2024-11-21 00:17:47,232 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@f1da57d 2024-11-21 00:17:47,275 main DEBUG Took 0.039117 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-21 00:17:47,281 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-21 00:17:47,283 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-21 00:17:47,286 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-21 00:17:47,288 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,306 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-21 00:17:47,326 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,328 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,329 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,330 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,330 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,331 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,334 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,335 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,335 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,336 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,337 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,338 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,338 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,339 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,340 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,340 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,341 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,341 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,342 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,342 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,343 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,343 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,344 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,344 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-21 00:17:47,345 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,345 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-21 00:17:47,346 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-21 00:17:47,348 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-21 00:17:47,351 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-21 00:17:47,352 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-21 00:17:47,354 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-21 00:17:47,360 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-21 00:17:47,373 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-21 00:17:47,378 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-21 00:17:47,380 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-21 00:17:47,381 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-21 00:17:47,386 main DEBUG createAppenders(={Console}) 2024-11-21 00:17:47,388 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@f1da57d initialized 2024-11-21 00:17:47,388 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@f1da57d 2024-11-21 00:17:47,389 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@f1da57d OK. 2024-11-21 00:17:47,390 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-21 00:17:47,390 main DEBUG OutputStream closed 2024-11-21 00:17:47,393 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-21 00:17:47,393 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-21 00:17:47,393 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@43dac38f OK 2024-11-21 00:17:47,522 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-21 00:17:47,525 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-21 00:17:47,527 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-21 00:17:47,528 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-21 00:17:47,529 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-21 00:17:47,529 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-21 00:17:47,530 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-21 00:17:47,530 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-21 00:17:47,530 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-21 00:17:47,531 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-21 00:17:47,531 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-21 00:17:47,532 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-21 00:17:47,532 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-21 00:17:47,533 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-21 00:17:47,533 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-21 00:17:47,534 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-21 00:17:47,534 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-21 00:17:47,535 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-21 00:17:47,538 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21 00:17:47,539 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@1b84f475) with optional ClassLoader: null 2024-11-21 00:17:47,539 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-21 00:17:47,540 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@1b84f475] started OK. 2024-11-21T00:17:47,560 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.replication.TestMasterReplication timeout: 13 mins 2024-11-21 00:17:47,563 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-21 00:17:47,564 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-21T00:17:47,648 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testBasePeerConfigsForReplicationPeer Thread=11, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=1012, ProcessCount=11, AvailableMemoryMB=2474 2024-11-21T00:17:48,059 INFO [Time-limited test {}] replication.TestMasterReplication(448): testBasePeerConfigsForPeerMutations 2024-11-21T00:17:48,130 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314 2024-11-21T00:17:48,135 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d, deleteOnExit=true 2024-11-21T00:17:48,416 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d/zookeeper_0, clientPort=50128, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:17:48,452 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50128 2024-11-21T00:17:48,453 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:17:48,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:17:48,483 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/test.cache.data in system properties and HBase conf 2024-11-21T00:17:48,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:17:48,484 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:17:48,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:17:48,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:17:48,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:17:48,609 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-21T00:17:48,841 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:17:48,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:17:48,857 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:17:48,858 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:17:48,862 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:17:48,863 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:17:48,865 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:17:48,866 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:17:48,868 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:17:48,869 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:17:48,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:17:48,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:17:48,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:17:48,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:17:48,887 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:17:50,658 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-21T00:17:50,785 INFO [Time-limited test {}] log.Log(170): Logging initialized @5333ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-21T00:17:50,932 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:17:51,060 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:17:51,144 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:17:51,144 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:17:51,146 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:17:51,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:17:51,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65423e7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:17:51,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22c42bf7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:17:51,522 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4219126d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/java.io.tmpdir/jetty-localhost-41103-hadoop-hdfs-3_4_1-tests_jar-_-any-15224481944735701879/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:17:51,532 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@63a6952a{HTTP/1.1, (http/1.1)}{localhost:41103} 2024-11-21T00:17:51,533 INFO [Time-limited test {}] server.Server(415): Started @6082ms 2024-11-21T00:17:52,372 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:17:52,384 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:17:52,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:17:52,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:17:52,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:17:52,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a66a1cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:17:52,396 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1edffc5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:17:52,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@318ae30c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/java.io.tmpdir/jetty-localhost-33407-hadoop-hdfs-3_4_1-tests_jar-_-any-4509197961703956891/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:17:52,549 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@60268925{HTTP/1.1, (http/1.1)}{localhost:33407} 2024-11-21T00:17:52,550 INFO [Time-limited test {}] server.Server(415): Started @7099ms 2024-11-21T00:17:52,622 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:17:54,409 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d/data/data1/current/BP-2095928671-172.17.0.2-1732148269835/current, will proceed with Du for space computation calculation, 2024-11-21T00:17:54,411 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d/data/data2/current/BP-2095928671-172.17.0.2-1732148269835/current, will proceed with Du for space computation calculation, 2024-11-21T00:17:54,509 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:17:54,595 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae1119eaf147ef68 with lease ID 0x258ce6c409d821a7: Processing first storage report for DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09 from datanode DatanodeRegistration(127.0.0.1:44135, datanodeUuid=db4e7784-de1d-4f93-a596-ea21b9cdc983, infoPort=39931, infoSecurePort=0, ipcPort=38787, storageInfo=lv=-57;cid=testClusterID;nsid=969774727;c=1732148269835) 2024-11-21T00:17:54,597 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae1119eaf147ef68 with lease ID 0x258ce6c409d821a7: from storage DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09 node DatanodeRegistration(127.0.0.1:44135, datanodeUuid=db4e7784-de1d-4f93-a596-ea21b9cdc983, infoPort=39931, infoSecurePort=0, ipcPort=38787, storageInfo=lv=-57;cid=testClusterID;nsid=969774727;c=1732148269835), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-21T00:17:54,598 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae1119eaf147ef68 with lease ID 0x258ce6c409d821a7: Processing first storage report for DS-34613727-23c3-41c9-a383-621aca807b73 from datanode DatanodeRegistration(127.0.0.1:44135, datanodeUuid=db4e7784-de1d-4f93-a596-ea21b9cdc983, infoPort=39931, infoSecurePort=0, ipcPort=38787, storageInfo=lv=-57;cid=testClusterID;nsid=969774727;c=1732148269835) 2024-11-21T00:17:54,599 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae1119eaf147ef68 with lease ID 0x258ce6c409d821a7: from storage DS-34613727-23c3-41c9-a383-621aca807b73 node DatanodeRegistration(127.0.0.1:44135, datanodeUuid=db4e7784-de1d-4f93-a596-ea21b9cdc983, infoPort=39931, infoSecurePort=0, ipcPort=38787, storageInfo=lv=-57;cid=testClusterID;nsid=969774727;c=1732148269835), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:17:54,654 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314 2024-11-21T00:17:54,688 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:17:54,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:17:55,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:17:55,552 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b with version=8 2024-11-21T00:17:55,553 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/hbase-staging 2024-11-21T00:17:55,683 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-21T00:17:55,989 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:17:56,002 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:17:56,003 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:17:56,010 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:17:56,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:17:56,010 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:17:56,235 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:17:56,316 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-21T00:17:56,328 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-21T00:17:56,333 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:17:56,364 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 11172 (auto-detected) 2024-11-21T00:17:56,365 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-21T00:17:56,389 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42433 2024-11-21T00:17:56,423 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42433 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:17:56,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:424330x0, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:17:56,555 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42433-0x1015ac108c20000 connected 2024-11-21T00:17:56,700 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:17:56,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:17:56,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:17:56,728 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b, hbase.cluster.distributed=false 2024-11-21T00:17:56,767 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/acl 2024-11-21T00:17:56,785 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42433 2024-11-21T00:17:56,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42433 2024-11-21T00:17:56,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42433 2024-11-21T00:17:56,828 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42433 2024-11-21T00:17:56,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42433 2024-11-21T00:17:57,003 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:17:57,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:17:57,007 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:17:57,008 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:17:57,008 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:17:57,009 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:17:57,014 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:17:57,018 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:17:57,019 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44897 2024-11-21T00:17:57,030 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44897 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:17:57,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:17:57,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:17:57,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:448970x0, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:17:57,105 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:448970x0, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:17:57,112 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:17:57,113 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44897-0x1015ac108c20001 connected 2024-11-21T00:17:57,135 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:17:57,138 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/master 2024-11-21T00:17:57,144 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/acl 2024-11-21T00:17:57,148 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44897 2024-11-21T00:17:57,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44897 2024-11-21T00:17:57,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44897 2024-11-21T00:17:57,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44897 2024-11-21T00:17:57,156 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44897 2024-11-21T00:17:57,172 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:42433 2024-11-21T00:17:57,176 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0857133414/backup-masters/5ed4808ef0e6,42433,1732148275776 2024-11-21T00:17:57,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:17:57,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:17:57,198 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on existing znode=/0857133414/backup-masters/5ed4808ef0e6,42433,1732148275776 2024-11-21T00:17:57,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0857133414/master 2024-11-21T00:17:57,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:57,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:57,240 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on existing znode=/0857133414/master 2024-11-21T00:17:57,241 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0857133414/backup-masters/5ed4808ef0e6,42433,1732148275776 from backup master directory 2024-11-21T00:17:57,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:17:57,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/backup-masters/5ed4808ef0e6,42433,1732148275776 2024-11-21T00:17:57,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:17:57,258 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:17:57,258 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,42433,1732148275776 2024-11-21T00:17:57,265 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-21T00:17:57,267 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-21T00:17:57,343 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/hbase.id] with ID: e99aa0aa-6523-4535-af8b-0f39a4d084f3 2024-11-21T00:17:57,343 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/.tmp/hbase.id 2024-11-21T00:17:57,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:17:57,358 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/.tmp/hbase.id]:[hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/hbase.id] 2024-11-21T00:17:57,416 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:17:57,423 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:17:57,450 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 25ms. 2024-11-21T00:17:57,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:57,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:57,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:17:57,484 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:17:57,487 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:17:57,515 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:17:57,521 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:17:57,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:17:57,602 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store 2024-11-21T00:17:57,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:17:58,038 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-21T00:17:58,043 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:17:58,044 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:17:58,044 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:17:58,045 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:17:58,047 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:17:58,047 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:17:58,047 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:17:58,048 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148278044Disabling compacts and flushes for region at 1732148278044Disabling writes for close at 1732148278047 (+3 ms)Writing region close event to WAL at 1732148278047Closed at 1732148278047 2024-11-21T00:17:58,051 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/.initializing 2024-11-21T00:17:58,051 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776 2024-11-21T00:17:58,060 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:17:58,079 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C42433%2C1732148275776, suffix=, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/oldWALs, maxLogs=10 2024-11-21T00:17:58,111 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085, exclude list is [], retry=0 2024-11-21T00:17:58,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:17:58,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-21T00:17:58,178 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 2024-11-21T00:17:58,179 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:17:58,180 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:17:58,180 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:17:58,184 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:17:58,271 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:58,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:17:58,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:17:58,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:58,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:17:58,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:17:58,288 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:58,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:17:58,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,295 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:17:58,296 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:58,297 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:17:58,298 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,303 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,305 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,312 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,313 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,319 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:17:58,331 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:17:58,339 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:17:58,341 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70823604, jitterRate=0.05535393953323364}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:17:58,352 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148278202Initializing all the Stores at 1732148278205 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148278205Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148278206 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148278206Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148278207 (+1 ms)Cleaning up temporary data from old regions at 1732148278313 (+106 ms)Region opened successfully at 1732148278352 (+39 ms) 2024-11-21T00:17:58,353 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:17:58,399 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f9449ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:17:58,432 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:17:58,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:17:58,446 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:17:58,449 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:17:58,451 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-21T00:17:58,457 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-21T00:17:58,457 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:17:58,494 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:17:58,507 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/balancer because node does not exist (not necessarily an error) 2024-11-21T00:17:58,523 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/balancer already deleted, retry=false 2024-11-21T00:17:58,526 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:17:58,528 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:17:58,540 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/normalizer already deleted, retry=false 2024-11-21T00:17:58,543 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:17:58,553 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:17:58,563 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/switch/split already deleted, retry=false 2024-11-21T00:17:58,566 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:17:58,573 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/switch/merge already deleted, retry=false 2024-11-21T00:17:58,597 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:17:58,605 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/snapshot-cleanup already deleted, retry=false 2024-11-21T00:17:58,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0857133414/running 2024-11-21T00:17:58,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0857133414/running 2024-11-21T00:17:58,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:58,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:58,621 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,42433,1732148275776, sessionid=0x1015ac108c20000, setting cluster-up flag (Was=false) 2024-11-21T00:17:58,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:58,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:58,692 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0857133414/flush-table-proc/acquired, /0857133414/flush-table-proc/reached, /0857133414/flush-table-proc/abort 2024-11-21T00:17:58,697 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42433,1732148275776 2024-11-21T00:17:58,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:58,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:58,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0857133414/online-snapshot/acquired, /0857133414/online-snapshot/reached, /0857133414/online-snapshot/abort 2024-11-21T00:17:58,760 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42433,1732148275776 2024-11-21T00:17:58,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:17:58,874 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(746): ClusterId : e99aa0aa-6523-4535-af8b-0f39a4d084f3 2024-11-21T00:17:58,877 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:17:58,918 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:17:58,919 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:17:58,921 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:17:58,935 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:17:58,935 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34241726, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:17:58,936 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:17:58,944 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:17:58,952 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,42433,1732148275776 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:17:58,972 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:44897 2024-11-21T00:17:58,977 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:17:58,978 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:17:58,985 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:17:58,989 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,42433,1732148275776 with port=44897, startcode=1732148276946 2024-11-21T00:17:58,995 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:17:58,996 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:17:58,996 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:17:58,996 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:17:59,001 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:17:59,001 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,001 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:17:59,002 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,016 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:17:59,035 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148309035 2024-11-21T00:17:59,040 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:17:59,043 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:17:59,043 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:17:59,043 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:17:59,057 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:17:59,058 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:17:59,058 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:59,058 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:17:59,058 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:17:59,058 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:17:59,073 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,099 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42379, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:17:59,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:17:59,118 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:17:59,119 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:17:59,124 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:17:59,126 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42433 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:17:59,160 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:17:59,161 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:17:59,165 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148279163,5,FailOnTimeoutGroup] 2024-11-21T00:17:59,170 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148279165,5,FailOnTimeoutGroup] 2024-11-21T00:17:59,170 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,170 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:17:59,172 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,172 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,174 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T00:17:59,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:17:59,175 WARN [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T00:17:59,180 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:17:59,181 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b 2024-11-21T00:17:59,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:17:59,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:17:59,252 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:17:59,260 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:17:59,260 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:59,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:17:59,263 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:17:59,272 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:17:59,272 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:59,274 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:17:59,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:17:59,278 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,42433,1732148275776 with port=44897, startcode=1732148276946 2024-11-21T00:17:59,281 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42433 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,44897,1732148276946 2024-11-21T00:17:59,284 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42433 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,44897,1732148276946 2024-11-21T00:17:59,286 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:17:59,286 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:59,288 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:17:59,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:17:59,297 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b 2024-11-21T00:17:59,312 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41721 2024-11-21T00:17:59,312 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:17:59,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:17:59,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:17:59,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:17:59,321 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:17:59,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740 2024-11-21T00:17:59,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740 2024-11-21T00:17:59,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/rs 2024-11-21T00:17:59,325 DEBUG [RS:0;5ed4808ef0e6:44897 {}] zookeeper.ZKUtil(111): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on existing znode=/0857133414/rs/5ed4808ef0e6,44897,1732148276946 2024-11-21T00:17:59,325 WARN [RS:0;5ed4808ef0e6:44897 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:17:59,326 INFO [RS:0;5ed4808ef0e6:44897 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:17:59,326 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946 2024-11-21T00:17:59,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:17:59,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:17:59,330 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:17:59,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:17:59,338 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,44897,1732148276946] 2024-11-21T00:17:59,364 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:17:59,365 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61614036, jitterRate=-0.08187931776046753}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:17:59,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148279242Initializing all the Stores at 1732148279245 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148279245Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148279249 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148279249Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148279249Cleaning up temporary data from old regions at 1732148279328 (+79 ms)Region opened successfully at 1732148279373 (+45 ms) 2024-11-21T00:17:59,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:17:59,374 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:17:59,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:17:59,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:17:59,374 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:17:59,376 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:17:59,376 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148279373Disabling compacts and flushes for region at 1732148279373Disabling writes for close at 1732148279374 (+1 ms)Writing region close event to WAL at 1732148279375 (+1 ms)Closed at 1732148279376 (+1 ms) 2024-11-21T00:17:59,381 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:17:59,381 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:17:59,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:17:59,407 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:17:59,413 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:17:59,421 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:17:59,451 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:17:59,457 INFO [RS:0;5ed4808ef0e6:44897 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:17:59,458 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,460 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:17:59,473 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:17:59,475 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,476 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,476 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,477 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,477 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,477 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,477 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:17:59,477 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,477 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,478 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,478 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,479 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,488 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:17:59,489 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:17:59,489 DEBUG [RS:0;5ed4808ef0e6:44897 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:17:59,497 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,498 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,499 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,499 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,499 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,499 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44897,1732148276946-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:17:59,542 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:17:59,545 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44897,1732148276946-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,546 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,547 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.Replication(171): 5ed4808ef0e6,44897,1732148276946 started 2024-11-21T00:17:59,565 WARN [5ed4808ef0e6:42433 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:17:59,579 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:17:59,580 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,44897,1732148276946, RpcServer on 5ed4808ef0e6/172.17.0.2:44897, sessionid=0x1015ac108c20001 2024-11-21T00:17:59,581 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:17:59,582 DEBUG [RS:0;5ed4808ef0e6:44897 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,44897,1732148276946 2024-11-21T00:17:59,582 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,44897,1732148276946' 2024-11-21T00:17:59,582 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0857133414/flush-table-proc/abort' 2024-11-21T00:17:59,585 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0857133414/flush-table-proc/acquired' 2024-11-21T00:17:59,586 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:17:59,586 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:17:59,587 DEBUG [RS:0;5ed4808ef0e6:44897 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,44897,1732148276946 2024-11-21T00:17:59,587 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,44897,1732148276946' 2024-11-21T00:17:59,587 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0857133414/online-snapshot/abort' 2024-11-21T00:17:59,589 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0857133414/online-snapshot/acquired' 2024-11-21T00:17:59,590 DEBUG [RS:0;5ed4808ef0e6:44897 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:17:59,590 INFO [RS:0;5ed4808ef0e6:44897 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:17:59,590 INFO [RS:0;5ed4808ef0e6:44897 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:17:59,700 INFO [RS:0;5ed4808ef0e6:44897 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:17:59,740 INFO [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C44897%2C1732148276946, suffix=, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs, maxLogs=10 2024-11-21T00:17:59,762 DEBUG [RS:0;5ed4808ef0e6:44897 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743, exclude list is [], retry=0 2024-11-21T00:17:59,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:17:59,774 INFO [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 2024-11-21T00:17:59,775 DEBUG [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:17:59,817 DEBUG [5ed4808ef0e6:42433 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:17:59,831 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,44897,1732148276946 2024-11-21T00:17:59,841 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,44897,1732148276946, state=OPENING 2024-11-21T00:17:59,889 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:17:59,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:59,900 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:17:59,902 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:17:59,902 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:17:59,906 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:17:59,909 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,44897,1732148276946}] 2024-11-21T00:18:00,093 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:18:00,103 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36887, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:18:00,127 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:18:00,127 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:00,129 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:18:00,135 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C44897%2C1732148276946.meta, suffix=.meta, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs, maxLogs=10 2024-11-21T00:18:00,162 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.meta.1732148280137.meta, exclude list is [], retry=0 2024-11-21T00:18:00,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:18:00,175 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.meta.1732148280137.meta 2024-11-21T00:18:00,178 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:18:00,178 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:00,193 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:00,197 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:00,201 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:18:00,204 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:18:00,217 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:18:00,218 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:00,218 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:18:00,219 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:18:00,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:00,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:00,226 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:00,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:00,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:00,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:00,242 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:00,248 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:00,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:00,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:00,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:00,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:00,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:00,264 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:00,264 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:00,268 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:00,269 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:00,272 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740 2024-11-21T00:18:00,277 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740 2024-11-21T00:18:00,284 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:00,284 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:00,286 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:00,293 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:00,296 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59825245, jitterRate=-0.10853438079357147}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:00,297 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:18:00,300 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148280219Writing region info on filesystem at 1732148280220 (+1 ms)Initializing all the Stores at 1732148280222 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148280222Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148280223 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148280224 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148280224Cleaning up temporary data from old regions at 1732148280285 (+61 ms)Running coprocessor post-open hooks at 1732148280298 (+13 ms)Region opened successfully at 1732148280300 (+2 ms) 2024-11-21T00:18:00,315 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148280081 2024-11-21T00:18:00,334 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:18:00,335 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:18:00,338 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,44897,1732148276946 2024-11-21T00:18:00,342 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,44897,1732148276946, state=OPEN 2024-11-21T00:18:00,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:00,396 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:00,396 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,44897,1732148276946 2024-11-21T00:18:00,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:00,401 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:00,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:18:00,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,44897,1732148276946 in 488 msec 2024-11-21T00:18:00,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:18:00,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0240 sec 2024-11-21T00:18:00,431 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:00,431 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:18:00,463 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:00,464 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44897,1732148276946, seqNum=-1] 2024-11-21T00:18:00,495 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:00,498 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54311, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:00,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.7160 sec 2024-11-21T00:18:00,564 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148280564, completionTime=-1 2024-11-21T00:18:00,567 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:18:00,567 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:18:00,603 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:18:00,603 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148340603 2024-11-21T00:18:00,603 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148400603 2024-11-21T00:18:00,603 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 36 msec 2024-11-21T00:18:00,608 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42433,1732148275776-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:00,609 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42433,1732148275776-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:00,611 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42433,1732148275776-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:00,614 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:42433, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:00,618 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:00,620 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:00,626 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:18:00,674 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.412sec 2024-11-21T00:18:00,676 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:18:00,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:18:00,680 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:18:00,688 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:18:00,689 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:18:00,690 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42433,1732148275776-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:00,691 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42433,1732148275776-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:18:00,705 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:18:00,706 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:18:00,707 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42433,1732148275776-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:00,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32f4153c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:00,791 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-21T00:18:00,791 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-21T00:18:00,796 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42433,-1 for getting cluster id 2024-11-21T00:18:00,800 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:00,836 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e99aa0aa-6523-4535-af8b-0f39a4d084f3' 2024-11-21T00:18:00,841 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:00,842 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e99aa0aa-6523-4535-af8b-0f39a4d084f3" 2024-11-21T00:18:00,845 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24f079a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:00,845 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42433,-1] 2024-11-21T00:18:00,851 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:00,854 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:00,863 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39326, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:00,871 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e7f98cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:00,872 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:00,886 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44897,1732148276946, seqNum=-1] 2024-11-21T00:18:00,887 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:00,895 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48226, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:00,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,42433,1732148275776 2024-11-21T00:18:00,935 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:18:00,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:00,971 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015ac108c20002 connected 2024-11-21T00:18:01,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.log.dir so I do NOT create it in target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c 2024-11-21T00:18:01,018 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:18:01,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.tmp.dir so I do NOT create it in target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c 2024-11-21T00:18:01,018 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:18:01,018 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c 2024-11-21T00:18:01,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:18:01,019 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/cluster_b1725b2c-39a7-23ea-8a4d-e181886c5c86, deleteOnExit=true 2024-11-21T00:18:01,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:18:01,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/test.cache.data in system properties and HBase conf 2024-11-21T00:18:01,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:18:01,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:18:01,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:18:01,020 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:18:01,022 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:18:01,022 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:18:01,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:01,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:01,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:18:01,023 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:01,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:18:01,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:18:01,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:01,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:01,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:18:01,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:18:01,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:18:01,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:01,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:18:01,025 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:18:01,443 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:01,452 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:01,456 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:01,457 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:01,457 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:18:01,458 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:01,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b608463{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:01,465 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d5127ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:01,635 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3a033f80{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/java.io.tmpdir/jetty-localhost-34251-hadoop-hdfs-3_4_1-tests_jar-_-any-1791466211882962305/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:01,637 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2993da97{HTTP/1.1, (http/1.1)}{localhost:34251} 2024-11-21T00:18:01,637 INFO [Time-limited test {}] server.Server(415): Started @16186ms 2024-11-21T00:18:02,178 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:02,184 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:02,192 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:02,193 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:02,193 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:18:02,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22b39d31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:02,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9b8f4a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:02,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c12bb3b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/java.io.tmpdir/jetty-localhost-42213-hadoop-hdfs-3_4_1-tests_jar-_-any-1163653481861598028/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:02,421 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d1ec4c5{HTTP/1.1, (http/1.1)}{localhost:42213} 2024-11-21T00:18:02,421 INFO [Time-limited test {}] server.Server(415): Started @16970ms 2024-11-21T00:18:02,423 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:18:03,630 WARN [Thread-196 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/cluster_b1725b2c-39a7-23ea-8a4d-e181886c5c86/data/data2/current/BP-755534061-172.17.0.2-1732148281066/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:03,630 WARN [Thread-195 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/cluster_b1725b2c-39a7-23ea-8a4d-e181886c5c86/data/data1/current/BP-755534061-172.17.0.2-1732148281066/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:03,686 WARN [Thread-183 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:18:03,699 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca8716fcda4e28c9 with lease ID 0x395744e166ef0cc3: Processing first storage report for DS-25b51043-85f4-453f-ae1d-6d6f73d64d7e from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4c360eb8-683a-449e-9eab-cc5a9758ef82, infoPort=46835, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=76796096;c=1732148281066) 2024-11-21T00:18:03,699 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca8716fcda4e28c9 with lease ID 0x395744e166ef0cc3: from storage DS-25b51043-85f4-453f-ae1d-6d6f73d64d7e node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4c360eb8-683a-449e-9eab-cc5a9758ef82, infoPort=46835, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=76796096;c=1732148281066), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:03,700 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xca8716fcda4e28c9 with lease ID 0x395744e166ef0cc3: Processing first storage report for DS-738d2b16-95ba-4bf7-983c-65722438ed90 from datanode DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4c360eb8-683a-449e-9eab-cc5a9758ef82, infoPort=46835, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=76796096;c=1732148281066) 2024-11-21T00:18:03,700 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xca8716fcda4e28c9 with lease ID 0x395744e166ef0cc3: from storage DS-738d2b16-95ba-4bf7-983c-65722438ed90 node DatanodeRegistration(127.0.0.1:32973, datanodeUuid=4c360eb8-683a-449e-9eab-cc5a9758ef82, infoPort=46835, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=76796096;c=1732148281066), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:03,791 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c 2024-11-21T00:18:03,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:03,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:18:04,213 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8 with version=8 2024-11-21T00:18:04,214 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/hbase-staging 2024-11-21T00:18:04,220 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:04,220 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:04,221 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:04,222 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:04,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:04,222 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:04,222 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:18:04,223 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:04,224 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36249 2024-11-21T00:18:04,225 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36249 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:18:04,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:362490x0, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:04,269 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36249-0x1015ac108c20003 connected 2024-11-21T00:18:04,342 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:04,348 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:04,352 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on znode that does not yet exist, /1-1336725220/running 2024-11-21T00:18:04,353 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8, hbase.cluster.distributed=false 2024-11-21T00:18:04,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on znode that does not yet exist, /1-1336725220/acl 2024-11-21T00:18:04,355 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36249 2024-11-21T00:18:04,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36249 2024-11-21T00:18:04,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36249 2024-11-21T00:18:04,357 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36249 2024-11-21T00:18:04,357 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36249 2024-11-21T00:18:04,373 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:04,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:04,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:04,374 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:04,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:04,374 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:04,374 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:18:04,374 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:04,375 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41251 2024-11-21T00:18:04,377 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41251 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:18:04,378 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:04,381 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:04,394 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412510x0, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:04,395 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:412510x0, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on znode that does not yet exist, /1-1336725220/running 2024-11-21T00:18:04,395 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41251-0x1015ac108c20004 connected 2024-11-21T00:18:04,395 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:18:04,397 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:18:04,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on znode that does not yet exist, /1-1336725220/master 2024-11-21T00:18:04,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on znode that does not yet exist, /1-1336725220/acl 2024-11-21T00:18:04,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41251 2024-11-21T00:18:04,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41251 2024-11-21T00:18:04,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41251 2024-11-21T00:18:04,401 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41251 2024-11-21T00:18:04,402 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41251 2024-11-21T00:18:04,414 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:36249 2024-11-21T00:18:04,414 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-1336725220/backup-masters/5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:04,440 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220/backup-masters 2024-11-21T00:18:04,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220/backup-masters 2024-11-21T00:18:04,444 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on existing znode=/1-1336725220/backup-masters/5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:04,457 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1336725220/master 2024-11-21T00:18:04,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:04,457 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:04,458 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on existing znode=/1-1336725220/master 2024-11-21T00:18:04,459 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-1336725220/backup-masters/5ed4808ef0e6,36249,1732148284219 from backup master directory 2024-11-21T00:18:04,468 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220/backup-masters 2024-11-21T00:18:04,468 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:04,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1336725220/backup-masters/5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:04,468 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:04,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220/backup-masters 2024-11-21T00:18:04,478 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/hbase.id] with ID: 706478ae-f456-4e84-b28f-48af54a5495c 2024-11-21T00:18:04,478 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/.tmp/hbase.id 2024-11-21T00:18:04,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:18:04,896 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/.tmp/hbase.id]:[hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/hbase.id] 2024-11-21T00:18:04,923 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:04,923 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:18:04,925 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-21T00:18:04,973 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:04,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:18:04,982 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:04,984 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:18:04,985 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:04,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:18:05,402 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store 2024-11-21T00:18:05,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:18:05,727 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:05,802 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:18:05,812 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:05,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:05,813 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:05,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:05,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:05,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:05,813 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:05,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148285813Disabling compacts and flushes for region at 1732148285813Disabling writes for close at 1732148285813Writing region close event to WAL at 1732148285813Closed at 1732148285813 2024-11-21T00:18:05,814 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/.initializing 2024-11-21T00:18:05,815 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/WALs/5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:05,816 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:05,819 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C36249%2C1732148284219, suffix=, logDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/WALs/5ed4808ef0e6,36249,1732148284219, archiveDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/oldWALs, maxLogs=10 2024-11-21T00:18:05,833 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/WALs/5ed4808ef0e6,36249,1732148284219/5ed4808ef0e6%2C36249%2C1732148284219.1732148285820, exclude list is [], retry=0 2024-11-21T00:18:05,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-25b51043-85f4-453f-ae1d-6d6f73d64d7e,DISK] 2024-11-21T00:18:05,841 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/WALs/5ed4808ef0e6,36249,1732148284219/5ed4808ef0e6%2C36249%2C1732148284219.1732148285820 2024-11-21T00:18:05,841 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46835:46835)] 2024-11-21T00:18:05,841 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:05,841 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:05,842 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,842 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,846 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:18:05,846 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:05,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:05,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:18:05,849 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:05,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:05,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:18:05,852 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:05,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:05,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,856 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:18:05,856 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:05,857 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:05,858 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,859 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,859 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,863 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:05,865 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:05,869 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:05,870 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65731073, jitterRate=-0.020530685782432556}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:05,871 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148285842Initializing all the Stores at 1732148285843 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148285843Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148285844 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148285844Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148285844Cleaning up temporary data from old regions at 1732148285862 (+18 ms)Region opened successfully at 1732148285870 (+8 ms) 2024-11-21T00:18:05,871 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:18:05,877 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d853825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:05,879 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:18:05,879 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:18:05,879 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:18:05,880 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:18:05,881 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:18:05,881 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:18:05,881 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:18:05,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:18:05,885 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Unable to get data of znode /1-1336725220/balancer because node does not exist (not necessarily an error) 2024-11-21T00:18:05,899 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1336725220/balancer already deleted, retry=false 2024-11-21T00:18:05,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:18:05,901 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Unable to get data of znode /1-1336725220/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:18:05,909 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1336725220/normalizer already deleted, retry=false 2024-11-21T00:18:05,910 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:18:05,912 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Unable to get data of znode /1-1336725220/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:18:05,920 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1336725220/switch/split already deleted, retry=false 2024-11-21T00:18:05,921 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Unable to get data of znode /1-1336725220/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:18:05,930 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1336725220/switch/merge already deleted, retry=false 2024-11-21T00:18:05,934 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Unable to get data of znode /1-1336725220/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:18:05,941 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1336725220/snapshot-cleanup already deleted, retry=false 2024-11-21T00:18:05,952 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1336725220/running 2024-11-21T00:18:05,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1336725220/running 2024-11-21T00:18:05,952 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:05,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:05,953 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,36249,1732148284219, sessionid=0x1015ac108c20003, setting cluster-up flag (Was=false) 2024-11-21T00:18:05,973 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:05,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:06,004 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1336725220/flush-table-proc/acquired, /1-1336725220/flush-table-proc/reached, /1-1336725220/flush-table-proc/abort 2024-11-21T00:18:06,006 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:06,025 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:06,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:06,057 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1336725220/online-snapshot/acquired, /1-1336725220/online-snapshot/reached, /1-1336725220/online-snapshot/abort 2024-11-21T00:18:06,060 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:06,063 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:18:06,067 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:06,068 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:18:06,068 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:18:06,068 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,36249,1732148284219 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:18:06,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:06,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:06,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:06,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:06,072 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:18:06,072 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,072 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:06,072 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,073 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148316073 2024-11-21T00:18:06,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:18:06,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:18:06,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:18:06,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:18:06,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:18:06,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:18:06,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,075 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:18:06,075 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:18:06,075 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:06,075 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:18:06,076 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:18:06,076 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:18:06,076 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:18:06,076 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:18:06,077 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148286076,5,FailOnTimeoutGroup] 2024-11-21T00:18:06,077 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148286077,5,FailOnTimeoutGroup] 2024-11-21T00:18:06,077 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,077 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:18:06,078 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,078 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,078 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:06,078 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:18:06,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:18:06,106 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(746): ClusterId : 706478ae-f456-4e84-b28f-48af54a5495c 2024-11-21T00:18:06,106 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:18:06,121 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:18:06,121 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:18:06,132 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:18:06,132 DEBUG [RS:0;5ed4808ef0e6:41251 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19c691c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:06,144 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:41251 2024-11-21T00:18:06,144 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:18:06,145 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:18:06,145 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:18:06,146 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,36249,1732148284219 with port=41251, startcode=1732148284373 2024-11-21T00:18:06,146 DEBUG [RS:0;5ed4808ef0e6:41251 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:18:06,148 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37007, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:18:06,149 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36249 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:06,149 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36249 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:06,151 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8 2024-11-21T00:18:06,152 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35053 2024-11-21T00:18:06,152 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:18:06,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220/rs 2024-11-21T00:18:06,218 DEBUG [RS:0;5ed4808ef0e6:41251 {}] zookeeper.ZKUtil(111): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on existing znode=/1-1336725220/rs/5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:06,218 WARN [RS:0;5ed4808ef0e6:41251 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:06,218 INFO [RS:0;5ed4808ef0e6:41251 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:06,218 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:06,218 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,41251,1732148284373] 2024-11-21T00:18:06,223 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:18:06,226 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:18:06,226 INFO [RS:0;5ed4808ef0e6:41251 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:18:06,226 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,226 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:18:06,228 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:18:06,228 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,228 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,229 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,229 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,229 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:06,229 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:06,229 DEBUG [RS:0;5ed4808ef0e6:41251 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:06,229 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,229 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,229 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,229 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,230 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,230 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41251,1732148284373-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:06,248 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:18:06,249 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41251,1732148284373-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,249 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,249 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.Replication(171): 5ed4808ef0e6,41251,1732148284373 started 2024-11-21T00:18:06,269 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:06,269 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,41251,1732148284373, RpcServer on 5ed4808ef0e6/172.17.0.2:41251, sessionid=0x1015ac108c20004 2024-11-21T00:18:06,269 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:18:06,269 DEBUG [RS:0;5ed4808ef0e6:41251 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:06,269 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,41251,1732148284373' 2024-11-21T00:18:06,269 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1336725220/flush-table-proc/abort' 2024-11-21T00:18:06,270 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1336725220/flush-table-proc/acquired' 2024-11-21T00:18:06,271 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:18:06,271 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:18:06,271 DEBUG [RS:0;5ed4808ef0e6:41251 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:06,271 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,41251,1732148284373' 2024-11-21T00:18:06,271 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1336725220/online-snapshot/abort' 2024-11-21T00:18:06,272 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1336725220/online-snapshot/acquired' 2024-11-21T00:18:06,272 DEBUG [RS:0;5ed4808ef0e6:41251 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:18:06,272 INFO [RS:0;5ed4808ef0e6:41251 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:18:06,273 INFO [RS:0;5ed4808ef0e6:41251 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:18:06,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:18:06,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T00:18:06,311 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:06,312 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver Metrics about HBase RegionObservers 2024-11-21T00:18:06,312 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:06,312 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T00:18:06,313 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:18:06,313 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T00:18:06,374 INFO [RS:0;5ed4808ef0e6:41251 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:06,379 INFO [RS:0;5ed4808ef0e6:41251 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C41251%2C1732148284373, suffix=, logDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373, archiveDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/oldWALs, maxLogs=10 2024-11-21T00:18:06,397 DEBUG [RS:0;5ed4808ef0e6:41251 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373/5ed4808ef0e6%2C41251%2C1732148284373.1732148286382, exclude list is [], retry=0 2024-11-21T00:18:06,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-25b51043-85f4-453f-ae1d-6d6f73d64d7e,DISK] 2024-11-21T00:18:06,404 INFO [RS:0;5ed4808ef0e6:41251 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373/5ed4808ef0e6%2C41251%2C1732148284373.1732148286382 2024-11-21T00:18:06,404 DEBUG [RS:0;5ed4808ef0e6:41251 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46835:46835)] 2024-11-21T00:18:06,491 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:18:06,491 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8 2024-11-21T00:18:06,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:18:06,904 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:06,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:06,909 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:06,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:06,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:06,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:06,912 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:06,912 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:06,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:06,913 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:06,916 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:06,916 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:06,916 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:06,917 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:06,919 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:06,919 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:06,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:06,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:06,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740 2024-11-21T00:18:06,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740 2024-11-21T00:18:06,925 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:06,925 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:06,926 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:06,927 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:06,930 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:06,931 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68428526, jitterRate=0.019664496183395386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:06,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148286904Initializing all the Stores at 1732148286906 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148286906Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148286906Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148286906Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148286906Cleaning up temporary data from old regions at 1732148286925 (+19 ms)Region opened successfully at 1732148286932 (+7 ms) 2024-11-21T00:18:06,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:06,932 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:06,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:06,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:06,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:06,933 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:06,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148286932Disabling compacts and flushes for region at 1732148286932Disabling writes for close at 1732148286932Writing region close event to WAL at 1732148286933 (+1 ms)Closed at 1732148286933 2024-11-21T00:18:06,935 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:06,935 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:18:06,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:18:06,938 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:06,940 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:18:07,090 DEBUG [5ed4808ef0e6:36249 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:18:07,091 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:07,094 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,41251,1732148284373, state=OPENING 2024-11-21T00:18:07,099 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:18:07,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:07,109 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:07,110 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1336725220/meta-region-server: CHANGED 2024-11-21T00:18:07,110 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:07,110 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1336725220/meta-region-server: CHANGED 2024-11-21T00:18:07,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,41251,1732148284373}] 2024-11-21T00:18:07,265 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:18:07,269 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50153, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:18:07,280 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:18:07,280 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:07,281 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:18:07,285 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C41251%2C1732148284373.meta, suffix=.meta, logDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373, archiveDir=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/oldWALs, maxLogs=10 2024-11-21T00:18:07,307 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373/5ed4808ef0e6%2C41251%2C1732148284373.meta.1732148287288.meta, exclude list is [], retry=0 2024-11-21T00:18:07,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32973,DS-25b51043-85f4-453f-ae1d-6d6f73d64d7e,DISK] 2024-11-21T00:18:07,315 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373/5ed4808ef0e6%2C41251%2C1732148284373.meta.1732148287288.meta 2024-11-21T00:18:07,316 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46835:46835)] 2024-11-21T00:18:07,316 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:07,317 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:07,317 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:07,317 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:18:07,317 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:18:07,317 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:18:07,317 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:07,318 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:18:07,318 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:18:07,321 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:07,322 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:07,323 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:07,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:07,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:07,326 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:07,326 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:07,326 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:07,327 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:07,328 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:07,328 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:07,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:07,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:07,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:07,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:07,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:07,332 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:07,333 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740 2024-11-21T00:18:07,335 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740 2024-11-21T00:18:07,337 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:07,337 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:07,338 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:07,340 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:07,342 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63392870, jitterRate=-0.05537262558937073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:07,342 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:18:07,342 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148287318Writing region info on filesystem at 1732148287318Initializing all the Stores at 1732148287320 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148287320Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148287320Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148287320Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148287320Cleaning up temporary data from old regions at 1732148287337 (+17 ms)Running coprocessor post-open hooks at 1732148287342 (+5 ms)Region opened successfully at 1732148287342 2024-11-21T00:18:07,344 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148287264 2024-11-21T00:18:07,348 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:18:07,348 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:18:07,349 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:07,351 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,41251,1732148284373, state=OPEN 2024-11-21T00:18:07,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1336725220/meta-region-server 2024-11-21T00:18:07,362 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1336725220/meta-region-server 2024-11-21T00:18:07,362 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:07,362 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1336725220/meta-region-server: CHANGED 2024-11-21T00:18:07,362 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1336725220/meta-region-server: CHANGED 2024-11-21T00:18:07,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:18:07,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,41251,1732148284373 in 252 msec 2024-11-21T00:18:07,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:18:07,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 432 msec 2024-11-21T00:18:07,374 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:07,374 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:18:07,376 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:07,376 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,41251,1732148284373, seqNum=-1] 2024-11-21T00:18:07,377 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:07,379 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34539, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:07,389 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3210 sec 2024-11-21T00:18:07,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148287389, completionTime=-1 2024-11-21T00:18:07,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:18:07,389 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:18:07,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:18:07,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148347392 2024-11-21T00:18:07,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148407392 2024-11-21T00:18:07,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:18:07,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36249,1732148284219-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:07,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36249,1732148284219-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:07,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36249,1732148284219-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:07,393 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:36249, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:07,393 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:07,393 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:07,396 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:18:07,399 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.931sec 2024-11-21T00:18:07,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:18:07,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:18:07,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:18:07,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:18:07,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:18:07,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36249,1732148284219-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:07,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36249,1732148284219-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:18:07,405 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:18:07,405 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:18:07,405 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36249,1732148284219-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:07,407 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cd1c23c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,407 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:07,407 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:07,409 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:07,410 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:07,410 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:07,411 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a6c3290, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,411 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:07,411 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:07,412 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:07,414 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41484, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:07,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bed972d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,416 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:07,418 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,41251,1732148284373, seqNum=-1] 2024-11-21T00:18:07,419 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:07,421 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:07,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:07,426 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:18:07,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:07,441 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015ac108c20005 connected 2024-11-21T00:18:07,460 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7751df50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,461 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42433,-1 for getting cluster id 2024-11-21T00:18:07,461 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:07,463 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e99aa0aa-6523-4535-af8b-0f39a4d084f3' 2024-11-21T00:18:07,463 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:07,463 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e99aa0aa-6523-4535-af8b-0f39a4d084f3" 2024-11-21T00:18:07,464 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4edb76e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,464 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42433,-1] 2024-11-21T00:18:07,464 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:07,467 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:07,469 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54704, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:07,471 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70eacf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,482 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:07,488 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42433,1732148275776 2024-11-21T00:18:07,491 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@69329681 2024-11-21T00:18:07,493 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:07,498 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54712, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:07,499 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:18:07,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:18:07,517 DEBUG [PEWorker-4 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:07,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:07,521 DEBUG [PEWorker-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fcde5d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,521 DEBUG [PEWorker-4 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:07,521 DEBUG [PEWorker-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:07,522 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:07,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:07,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:07,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cf986cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:07,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:07,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:07,526 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41504, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:07,528 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55bd5295, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:07,529 DEBUG [PEWorker-4 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:07,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:07,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@20573df7 2024-11-21T00:18:07,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:07,543 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41506, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:07,562 INFO [PEWorker-4 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-4. 2024-11-21T00:18:07,563 DEBUG [PEWorker-4 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:18:07,569 DEBUG [PEWorker-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:07,570 DEBUG [PEWorker-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:07,570 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:07,573 INFO [PEWorker-4 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:07,580 DEBUG [PEWorker-4 {}] procedure2.ProcedureExecutor(1139): Stored pid=5, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:18:07,592 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:18:07,593 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:07,605 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:18:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:07,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741835_1011 (size=1138) 2024-11-21T00:18:07,634 DEBUG [PEWorker-4 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:18:07,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:08,030 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 16c2eddea6b943d19f79621ce6daf354, NAME => 'hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b 2024-11-21T00:18:08,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741836_1012 (size=44) 2024-11-21T00:18:08,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:08,445 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:08,445 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 16c2eddea6b943d19f79621ce6daf354, disabling compactions & flushes 2024-11-21T00:18:08,445 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:08,445 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:08,445 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. after waiting 0 ms 2024-11-21T00:18:08,445 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:08,445 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:08,445 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 16c2eddea6b943d19f79621ce6daf354: Waiting for close lock at 1732148288445Disabling compacts and flushes for region at 1732148288445Disabling writes for close at 1732148288445Writing region close event to WAL at 1732148288445Closed at 1732148288445 2024-11-21T00:18:08,448 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:18:08,453 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148288448"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148288448"}]},"ts":"1732148288448"} 2024-11-21T00:18:08,457 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:18:08,459 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:18:08,461 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148288459"}]},"ts":"1732148288459"} 2024-11-21T00:18:08,466 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:18:08,467 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN}] 2024-11-21T00:18:08,470 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN 2024-11-21T00:18:08,472 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,44897,1732148276946; forceNewPlan=false, retain=false 2024-11-21T00:18:08,623 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=6 updating hbase:meta row=16c2eddea6b943d19f79621ce6daf354, regionState=OPENING, regionLocation=5ed4808ef0e6,44897,1732148276946 2024-11-21T00:18:08,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN because future has completed 2024-11-21T00:18:08,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=7, ppid=6, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16c2eddea6b943d19f79621ce6daf354, server=5ed4808ef0e6,44897,1732148276946}] 2024-11-21T00:18:08,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:08,789 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:08,789 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:08,789 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:18:08,792 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C44897%2C1732148276946.rep, suffix=, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs, maxLogs=10 2024-11-21T00:18:08,808 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.rep.1732148288792, exclude list is [], retry=0 2024-11-21T00:18:08,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:18:08,814 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.rep.1732148288792 2024-11-21T00:18:08,814 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:18:08,815 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7752): Opening region: {ENCODED => 16c2eddea6b943d19f79621ce6daf354, NAME => 'hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:08,815 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:08,815 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:08,815 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. service=MultiRowMutationService 2024-11-21T00:18:08,815 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:18:08,816 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,816 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:08,816 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7794): checking encryption for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,816 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7797): checking classloading for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,818 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,820 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16c2eddea6b943d19f79621ce6daf354 columnFamilyName hfileref 2024-11-21T00:18:08,820 DEBUG [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:08,820 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(327): Store=16c2eddea6b943d19f79621ce6daf354/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:08,821 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,822 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16c2eddea6b943d19f79621ce6daf354 columnFamilyName queue 2024-11-21T00:18:08,822 DEBUG [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:08,823 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(327): Store=16c2eddea6b943d19f79621ce6daf354/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:08,823 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,825 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16c2eddea6b943d19f79621ce6daf354 columnFamilyName sid 2024-11-21T00:18:08,825 DEBUG [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:08,826 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(327): Store=16c2eddea6b943d19f79621ce6daf354/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:08,826 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1038): replaying wal for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,827 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,827 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,829 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1048): stopping wal replay for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,829 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1060): Cleaning up temporary data for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,830 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:18:08,832 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1093): writing seq id for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,834 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:08,835 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1114): Opened 16c2eddea6b943d19f79621ce6daf354; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64139975, jitterRate=-0.044239893555641174}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:18:08,835 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:08,836 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1006): Region open journal for 16c2eddea6b943d19f79621ce6daf354: Running coprocessor pre-open hook at 1732148288816Writing region info on filesystem at 1732148288816Initializing all the Stores at 1732148288817 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148288818 (+1 ms)Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148288818Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148288818Cleaning up temporary data from old regions at 1732148288829 (+11 ms)Running coprocessor post-open hooks at 1732148288835 (+6 ms)Region opened successfully at 1732148288836 (+1 ms) 2024-11-21T00:18:08,838 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354., pid=7, masterSystemTime=1732148288783 2024-11-21T00:18:08,841 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:08,841 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:08,842 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=6 updating hbase:meta row=16c2eddea6b943d19f79621ce6daf354, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,44897,1732148276946 2024-11-21T00:18:08,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, ppid=6, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16c2eddea6b943d19f79621ce6daf354, server=5ed4808ef0e6,44897,1732148276946 because future has completed 2024-11-21T00:18:08,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=7, resume processing ppid=6 2024-11-21T00:18:08,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, ppid=6, state=SUCCESS, hasLock=false; OpenRegionProcedure 16c2eddea6b943d19f79621ce6daf354, server=5ed4808ef0e6,44897,1732148276946 in 218 msec 2024-11-21T00:18:08,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:18:08,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN in 384 msec 2024-11-21T00:18:08,856 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:18:08,856 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148288856"}]},"ts":"1732148288856"} 2024-11-21T00:18:08,859 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:18:08,861 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:18:08,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 1.2870 sec 2024-11-21T00:18:08,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354., hostname=5ed4808ef0e6,44897,1732148276946, seqNum=2] 2024-11-21T00:18:08,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44897 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-21T00:18:08,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:08,930 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:08,965 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=4, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:18:09,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44897 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=8 2024-11-21T00:18:09,133 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:18:09,168 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,44897,1732148276946, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:18:09,169 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:09,170 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44897,1732148276946, seqNum=-1] 2024-11-21T00:18:09,170 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:09,171 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47375, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-11-21T00:18:09,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,44897,1732148276946', locateType=CURRENT is [region=hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354., hostname=5ed4808ef0e6,44897,1732148276946, seqNum=2] 2024-11-21T00:18:09,180 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-21T00:18:09,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-21T00:18:09,185 INFO [PEWorker-1 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,44897,1732148276946 suceeded 2024-11-21T00:18:09,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=4 2024-11-21T00:18:09,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 220 msec 2024-11-21T00:18:09,189 INFO [PEWorker-2 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:18:09,194 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.6870 sec 2024-11-21T00:18:09,201 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:09,203 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1de1d83b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,203 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:09,204 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:09,205 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:09,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:09,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:09,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@21f9abfe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:09,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:09,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,207 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:09,208 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@4065d65d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,208 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:09,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:09,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7cfa2a48 2024-11-21T00:18:09,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:09,210 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=MasterService 2024-11-21T00:18:09,211 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,44897,1732148276946 (queues=1) is replicating from cluster=e99aa0aa-6523-4535-af8b-0f39a4d084f3 to cluster=706478ae-f456-4e84-b28f-48af54a5495c 2024-11-21T00:18:09,214 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C44897%2C1732148276946 2024-11-21T00:18:09,219 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,44897,1732148276946, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:18:09,221 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.shipper5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C44897%2C1732148276946 2024-11-21T00:18:09,226 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743, startPosition=0, beingWritten=true 2024-11-21T00:18:09,479 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:09,679 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:09,679 INFO [RPCClient-NioEventLoopGroup-4-9 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:18:09,680 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:460) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:09,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,680 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:09,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13ac2555, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,682 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42433,-1 for getting cluster id 2024-11-21T00:18:09,682 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:09,684 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e99aa0aa-6523-4535-af8b-0f39a4d084f3' 2024-11-21T00:18:09,685 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:09,686 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e99aa0aa-6523-4535-af8b-0f39a4d084f3" 2024-11-21T00:18:09,686 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f41929, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,686 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42433,-1] 2024-11-21T00:18:09,687 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:09,687 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,689 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:09,690 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@652f0dc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,691 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:09,692 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42433,1732148275776 2024-11-21T00:18:09,692 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@738cd1be 2024-11-21T00:18:09,692 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:09,694 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:09,695 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=2, config=clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:18:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:18:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-21T00:18:09,699 DEBUG [PEWorker-3 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:09,701 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15344fbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,701 DEBUG [PEWorker-3 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:09,701 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:09,702 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:09,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:09,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:09,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@509a0c2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:09,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:09,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,705 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:09,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57564946, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,707 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:09,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:09,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2ba3270a 2024-11-21T00:18:09,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:09,710 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41556, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:09,711 INFO [PEWorker-3 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-3. 2024-11-21T00:18:09,711 DEBUG [PEWorker-3 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:18:09,711 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,712 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,712 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:09,774 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:18:09,790 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:09,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-21T00:18:09,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44897 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=10 2024-11-21T00:18:09,931 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=10}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=2, type=ADD_PEER 2024-11-21T00:18:09,960 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=10}] regionserver.ReplicationSource(231): queueId=2-5ed4808ef0e6,44897,1732148276946, ReplicationSource: 2, currentBandwidth=0 2024-11-21T00:18:09,963 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-21T00:18:09,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-21T00:18:09,967 INFO [PEWorker-5 {}] replication.RefreshPeerProcedure(132): Refresh peer 2 for ADD on 5ed4808ef0e6,44897,1732148276946 suceeded 2024-11-21T00:18:09,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:18:09,970 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 193 msec 2024-11-21T00:18:09,970 INFO [PEWorker-1 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 2, config clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:18:09,973 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 275 msec 2024-11-21T00:18:09,981 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:09,983 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@42948875, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,983 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:09,983 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:09,984 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:09,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:09,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:09,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@4cb1d92f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:09,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:09,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:09,986 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41572, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:09,986 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@373998e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:09,987 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:09,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:09,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@38825ea6 2024-11-21T00:18:09,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:09,989 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=MasterService 2024-11-21T00:18:09,989 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSource(613): peerId=2, queueId=2-5ed4808ef0e6,44897,1732148276946 (queues=1) is replicating from cluster=e99aa0aa-6523-4535-af8b-0f39a4d084f3 to cluster=706478ae-f456-4e84-b28f-48af54a5495c 2024-11-21T00:18:09,990 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSource(382): peerId=2, starting shipping worker for walGroupId=5ed4808ef0e6%2C44897%2C1732148276946 2024-11-21T00:18:09,990 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=2-5ed4808ef0e6,44897,1732148276946, ReplicationSourceWALReaderThread : 2 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:18:09,990 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.shipper5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C44897%2C1732148276946 2024-11-21T00:18:09,990 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743, startPosition=0, beingWritten=true 2024-11-21T00:18:10,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-21T00:18:10,019 INFO [RPCClient-NioEventLoopGroup-4-12 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 2 completed 2024-11-21T00:18:10,019 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:10,019 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:461) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:10,020 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,020 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,020 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:10,020 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:10,022 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42433,1732148275776 2024-11-21T00:18:10,022 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2eaee9d6 2024-11-21T00:18:10,024 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:10,026 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:10,027 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:18:10,030 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=2 2024-11-21T00:18:10,031 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:18:10,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=2 2024-11-21T00:18:10,038 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4014): Client=jenkins//172.17.0.2 update replication peer config, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:18:10,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure 2024-11-21T00:18:10,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-21T00:18:10,046 DEBUG [PEWorker-2 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:10,046 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@83c17a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:10,047 DEBUG [PEWorker-2 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:10,047 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:10,048 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:10,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:10,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:10,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6453c4d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:10,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:10,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:10,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,050 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:10,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40c3c4cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:10,051 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:10,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:10,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4a305688 2024-11-21T00:18:10,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:10,054 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41606, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:10,055 INFO [PEWorker-2 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-2. 2024-11-21T00:18:10,055 DEBUG [PEWorker-2 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preUpdatePeerConfig(ReplicationPeerManager.java:234) at org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure.prePeerModification(UpdatePeerConfigProcedure.java:176) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:18:10,055 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,055 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,055 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:10,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:18:10,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-21T00:18:10,197 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:10,199 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:10,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44897 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=12 2024-11-21T00:18:10,223 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=12}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=UPDATE_PEER_CONFIG 2024-11-21T00:18:10,225 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-21T00:18:10,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-21T00:18:10,229 INFO [PEWorker-3 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for UPDATE_CONFIG on 5ed4808ef0e6,44897,1732148276946 suceeded 2024-11-21T00:18:10,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-21T00:18:10,233 INFO [PEWorker-5 {}] replication.UpdatePeerConfigProcedure(197): Successfully updated peer config of 1 to clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:18:10,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 161 msec 2024-11-21T00:18:10,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure in 195 msec 2024-11-21T00:18:10,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-21T00:18:10,358 INFO [RPCClient-NioEventLoopGroup-4-13 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: UPDATE_REPLICATION_PEER_CONFIG, peerId: 1 completed 2024-11-21T00:18:10,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4014): Client=jenkins//172.17.0.2 update replication peer config, id=2, config=clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:18:10,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure 2024-11-21T00:18:10,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-21T00:18:10,364 DEBUG [PEWorker-1 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:10,365 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73dfaef2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:10,365 DEBUG [PEWorker-1 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:10,366 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:10,367 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:10,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:10,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:10,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@716944e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:10,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:10,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:10,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,369 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41614, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:10,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f937095, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:10,372 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:10,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:10,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@613ca5f9 2024-11-21T00:18:10,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:10,375 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41628, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:10,376 INFO [PEWorker-1 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-1. 2024-11-21T00:18:10,376 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preUpdatePeerConfig(ReplicationPeerManager.java:234) at org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure.prePeerModification(UpdatePeerConfigProcedure.java:176) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:18:10,376 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,376 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,376 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:10,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:18:10,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-21T00:18:10,508 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:10,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44897 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=14 2024-11-21T00:18:10,538 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=14}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=2, type=UPDATE_PEER_CONFIG 2024-11-21T00:18:10,540 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-21T00:18:10,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-21T00:18:10,545 INFO [PEWorker-2 {}] replication.RefreshPeerProcedure(132): Refresh peer 2 for UPDATE_CONFIG on 5ed4808ef0e6,44897,1732148276946 suceeded 2024-11-21T00:18:10,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-21T00:18:10,550 INFO [PEWorker-3 {}] replication.UpdatePeerConfigProcedure(197): Successfully updated peer config of 2 to clusterKey=hbase+rpc://5ed4808ef0e6:36249,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:18:10,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 161 msec 2024-11-21T00:18:10,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure in 191 msec 2024-11-21T00:18:10,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-21T00:18:10,679 INFO [RPCClient-NioEventLoopGroup-4-13 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: UPDATE_REPLICATION_PEER_CONFIG, peerId: 2 completed 2024-11-21T00:18:10,680 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:18:10,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42433 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=2 2024-11-21T00:18:10,682 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:10,682 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:495) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:10,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,682 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:10,682 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:18:10,683 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1673203762, stopped=false 2024-11-21T00:18:10,683 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,42433,1732148275776 2024-11-21T00:18:10,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/running 2024-11-21T00:18:10,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/running 2024-11-21T00:18:10,699 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:10,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:10,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:10,699 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:10,699 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:495) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:10,699 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,699 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:18:10,699 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:18:10,700 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,44897,1732148276946' ***** 2024-11-21T00:18:10,700 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:18:10,701 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:18:10,701 INFO [RS:0;5ed4808ef0e6:44897 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:18:10,701 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:18:10,701 INFO [RS:0;5ed4808ef0e6:44897 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:18:10,701 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(3091): Received CLOSE for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:10,702 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,44897,1732148276946 2024-11-21T00:18:10,702 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:10,702 INFO [RS:0;5ed4808ef0e6:44897 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:44897. 2024-11-21T00:18:10,702 DEBUG [RS:0;5ed4808ef0e6:44897 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:10,702 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:10,702 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 16c2eddea6b943d19f79621ce6daf354, disabling compactions & flushes 2024-11-21T00:18:10,702 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:10,702 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:18:10,702 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:18:10,702 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:10,702 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:18:10,702 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. after waiting 0 ms 2024-11-21T00:18:10,702 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:10,702 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:18:10,703 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:18:10,703 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:10,703 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 16c2eddea6b943d19f79621ce6daf354=hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.} 2024-11-21T00:18:10,703 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:10,703 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:10,703 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:10,703 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:10,703 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:10,706 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-21T00:18:10,706 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 16c2eddea6b943d19f79621ce6daf354 3/3 column families, dataSize=294 B heapSize=1.19 KB 2024-11-21T00:18:10,710 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:10,747 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:10,777 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/info/a165e616f35d4d459698751ae28b0e0d is 147, key is hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354./info:regioninfo/1732148288842/Put/seqid=0 2024-11-21T00:18:10,777 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/.tmp/queue/df2dd4c028384048ac6ac8a934b1bb66 is 151, key is 1-5ed4808ef0e6,44897,1732148276946/queue:5ed4808ef0e6%2C44897%2C1732148276946/1732148289175/Put/seqid=0 2024-11-21T00:18:10,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741838_1014 (size=5500) 2024-11-21T00:18:10,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741839_1015 (size=6631) 2024-11-21T00:18:10,904 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:10,917 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:11,104 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:11,195 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=294 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/.tmp/queue/df2dd4c028384048ac6ac8a934b1bb66 2024-11-21T00:18:11,195 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.17 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/info/a165e616f35d4d459698751ae28b0e0d 2024-11-21T00:18:11,242 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/.tmp/queue/df2dd4c028384048ac6ac8a934b1bb66 as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/queue/df2dd4c028384048ac6ac8a934b1bb66 2024-11-21T00:18:11,254 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/queue/df2dd4c028384048ac6ac8a934b1bb66, entries=2, sequenceid=6, filesize=5.4 K 2024-11-21T00:18:11,259 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/ns/8584763792984ca5b1fc3b4e98482bed is 43, key is default/ns:d/1732148280504/Put/seqid=0 2024-11-21T00:18:11,263 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~294 B/294, heapSize ~688 B/688, currentSize=0 B/0 for 16c2eddea6b943d19f79621ce6daf354 in 554ms, sequenceid=6, compaction requested=false 2024-11-21T00:18:11,263 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:18:11,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741840_1016 (size=5153) 2024-11-21T00:18:11,274 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T00:18:11,276 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:11,276 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:11,276 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:11,277 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 16c2eddea6b943d19f79621ce6daf354: Waiting for close lock at 1732148290702Running coprocessor pre-close hooks at 1732148290702Disabling compacts and flushes for region at 1732148290702Disabling writes for close at 1732148290702Obtaining lock to block concurrent updates at 1732148290706 (+4 ms)Preparing flush snapshotting stores in 16c2eddea6b943d19f79621ce6daf354 at 1732148290706Finished memstore snapshotting hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354., syncing WAL and waiting on mvcc, flushsize=dataSize=294, getHeapSize=1168, getOffHeapSize=0, getCellsCount=2 at 1732148290714 (+8 ms)Flushing stores of hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. at 1732148290716 (+2 ms)Flushing 16c2eddea6b943d19f79621ce6daf354/queue: creating writer at 1732148290718 (+2 ms)Flushing 16c2eddea6b943d19f79621ce6daf354/queue: appending metadata at 1732148290760 (+42 ms)Flushing 16c2eddea6b943d19f79621ce6daf354/queue: closing flushed file at 1732148290763 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e939d25: reopening flushed file at 1732148291240 (+477 ms)Finished flush of dataSize ~294 B/294, heapSize ~688 B/688, currentSize=0 B/0 for 16c2eddea6b943d19f79621ce6daf354 in 554ms, sequenceid=6, compaction requested=false at 1732148291263 (+23 ms)Writing region close event to WAL at 1732148291268 (+5 ms)Running coprocessor post-close hooks at 1732148291274 (+6 ms)Closed at 1732148291276 (+2 ms) 2024-11-21T00:18:11,277 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:11,304 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:11,320 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:11,424 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:11,505 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:11,505 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:18:11,505 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:18:11,669 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/ns/8584763792984ca5b1fc3b4e98482bed 2024-11-21T00:18:11,699 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/table/870975dcc3ea4a9a82bc67ddb44e064e is 53, key is hbase:replication/table:state/1732148288856/Put/seqid=0 2024-11-21T00:18:11,705 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:18:11,705 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:18:11,705 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:11,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741841_1017 (size=5256) 2024-11-21T00:18:11,905 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:12,030 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:12,031 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946/5ed4808ef0e6%2C44897%2C1732148276946.1732148279743 to pos 0, reset compression=false 2024-11-21T00:18:12,106 DEBUG [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:12,109 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=98 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/table/870975dcc3ea4a9a82bc67ddb44e064e 2024-11-21T00:18:12,121 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/info/a165e616f35d4d459698751ae28b0e0d as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/info/a165e616f35d4d459698751ae28b0e0d 2024-11-21T00:18:12,131 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/info/a165e616f35d4d459698751ae28b0e0d, entries=10, sequenceid=11, filesize=6.5 K 2024-11-21T00:18:12,133 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/ns/8584763792984ca5b1fc3b4e98482bed as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/ns/8584763792984ca5b1fc3b4e98482bed 2024-11-21T00:18:12,145 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/ns/8584763792984ca5b1fc3b4e98482bed, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:18:12,147 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/table/870975dcc3ea4a9a82bc67ddb44e064e as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/table/870975dcc3ea4a9a82bc67ddb44e064e 2024-11-21T00:18:12,157 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/table/870975dcc3ea4a9a82bc67ddb44e064e, entries=2, sequenceid=11, filesize=5.1 K 2024-11-21T00:18:12,159 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1368, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 1456ms, sequenceid=11, compaction requested=false 2024-11-21T00:18:12,170 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T00:18:12,171 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:12,171 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:12,171 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:12,171 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148290703Running coprocessor pre-close hooks at 1732148290703Disabling compacts and flushes for region at 1732148290703Disabling writes for close at 1732148290703Obtaining lock to block concurrent updates at 1732148290706 (+3 ms)Preparing flush snapshotting stores in 1588230740 at 1732148290706Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1368, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732148290714 (+8 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148290715 (+1 ms)Flushing 1588230740/info: creating writer at 1732148290718 (+3 ms)Flushing 1588230740/info: appending metadata at 1732148290761 (+43 ms)Flushing 1588230740/info: closing flushed file at 1732148290763 (+2 ms)Flushing 1588230740/ns: creating writer at 1732148291240 (+477 ms)Flushing 1588230740/ns: appending metadata at 1732148291258 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732148291258Flushing 1588230740/table: creating writer at 1732148291681 (+423 ms)Flushing 1588230740/table: appending metadata at 1732148291698 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732148291698Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@519bfbd8: reopening flushed file at 1732148292120 (+422 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68ce885a: reopening flushed file at 1732148292132 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28b6bf98: reopening flushed file at 1732148292145 (+13 ms)Finished flush of dataSize ~1.34 KB/1368, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 1456ms, sequenceid=11, compaction requested=false at 1732148292159 (+14 ms)Writing region close event to WAL at 1732148292161 (+2 ms)Running coprocessor post-close hooks at 1732148292171 (+10 ms)Closed at 1732148292171 2024-11-21T00:18:12,171 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:12,306 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,44897,1732148276946; all regions closed. 2024-11-21T00:18:12,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741834_1010 (size=2742) 2024-11-21T00:18:12,317 DEBUG [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs 2024-11-21T00:18:12,317 INFO [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C44897%2C1732148276946.meta:.meta(num 1732148280137) 2024-11-21T00:18:12,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741837_1013 (size=1802) 2024-11-21T00:18:12,324 DEBUG [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs 2024-11-21T00:18:12,324 INFO [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C44897%2C1732148276946.rep:(num 1732148288792) 2024-11-21T00:18:12,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741833_1009 (size=93) 2024-11-21T00:18:12,330 DEBUG [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs 2024-11-21T00:18:12,330 INFO [RS:0;5ed4808ef0e6:44897 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C44897%2C1732148276946:(num 1732148279743) 2024-11-21T00:18:12,330 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:12,330 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:12,330 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:12,331 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:12,331 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:12,331 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:12,331 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,44897,1732148276946 because: Region server is closing 2024-11-21T00:18:12,332 INFO [RS:0;5ed4808ef0e6:44897 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:44897. 2024-11-21T00:18:12,332 DEBUG [RS:0;5ed4808ef0e6:44897 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:12,332 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:12,332 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:12,332 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:12,432 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:18:12,432 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.shipper5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:18:12,433 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44897,1732148276946.replicationSource.shipper5ed4808ef0e6%2C44897%2C1732148276946,1-5ed4808ef0e6,44897,1732148276946 terminated 2024-11-21T00:18:12,434 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.ReplicationSource(687): peerId=2, Closing source 2-5ed4808ef0e6,44897,1732148276946 because: Region server is closing 2024-11-21T00:18:12,434 INFO [RS:0;5ed4808ef0e6:44897 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:44897. 2024-11-21T00:18:12,434 DEBUG [RS:0;5ed4808ef0e6:44897 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:12,435 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:12,435 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:12,435 DEBUG [RS:0;5ed4808ef0e6:44897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:12,536 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.wal-reader.5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:18:12,536 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.shipper5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:18:12,536 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.ReplicationSource(739): peerId=2, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,44897,1732148276946.replicationSource.shipper5ed4808ef0e6%2C44897%2C1732148276946,2-5ed4808ef0e6,44897,1732148276946 terminated 2024-11-21T00:18:12,537 INFO [RS:0;5ed4808ef0e6:44897 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44897 2024-11-21T00:18:12,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/rs/5ed4808ef0e6,44897,1732148276946 2024-11-21T00:18:12,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/rs 2024-11-21T00:18:12,551 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:12,562 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,44897,1732148276946] 2024-11-21T00:18:12,572 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/draining/5ed4808ef0e6,44897,1732148276946 already deleted, retry=false 2024-11-21T00:18:12,572 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,44897,1732148276946 expired; onlineServers=0 2024-11-21T00:18:12,573 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,42433,1732148275776' ***** 2024-11-21T00:18:12,573 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:18:12,573 INFO [M:0;5ed4808ef0e6:42433 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:12,573 INFO [M:0;5ed4808ef0e6:42433 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:12,573 DEBUG [M:0;5ed4808ef0e6:42433 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:18:12,573 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:18:12,573 DEBUG [M:0;5ed4808ef0e6:42433 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:18:12,573 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148279165 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148279165,5,FailOnTimeoutGroup] 2024-11-21T00:18:12,573 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148279163 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148279163,5,FailOnTimeoutGroup] 2024-11-21T00:18:12,573 INFO [M:0;5ed4808ef0e6:42433 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:12,574 INFO [M:0;5ed4808ef0e6:42433 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:12,574 DEBUG [M:0;5ed4808ef0e6:42433 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:18:12,574 INFO [M:0;5ed4808ef0e6:42433 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:18:12,574 INFO [M:0;5ed4808ef0e6:42433 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:12,575 INFO [M:0;5ed4808ef0e6:42433 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:18:12,575 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:18:12,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/master 2024-11-21T00:18:12,608 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:12,609 DEBUG [M:0;5ed4808ef0e6:42433 {}] zookeeper.ZKUtil(347): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/master because node does not exist (not an error) 2024-11-21T00:18:12,609 WARN [M:0;5ed4808ef0e6:42433 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:18:12,610 INFO [M:0;5ed4808ef0e6:42433 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/.lastflushedseqids 2024-11-21T00:18:12,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741842_1018 (size=181) 2024-11-21T00:18:12,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:12,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44897-0x1015ac108c20001, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:12,663 INFO [RS:0;5ed4808ef0e6:44897 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:12,663 INFO [RS:0;5ed4808ef0e6:44897 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,44897,1732148276946; zookeeper connection closed. 2024-11-21T00:18:12,664 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@310c4045 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@310c4045 2024-11-21T00:18:12,665 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:18:12,821 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:13,036 INFO [M:0;5ed4808ef0e6:42433 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:18:13,037 INFO [M:0;5ed4808ef0e6:42433 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:18:13,037 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:13,037 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:13,037 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:13,037 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:13,037 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:13,038 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=50.24 KB heapSize=61.14 KB 2024-11-21T00:18:13,061 DEBUG [M:0;5ed4808ef0e6:42433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04c3647d9a50453da21906b14bc01c5b is 82, key is hbase:meta,,1/info:regioninfo/1732148280337/Put/seqid=0 2024-11-21T00:18:13,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741843_1019 (size=5672) 2024-11-21T00:18:13,468 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04c3647d9a50453da21906b14bc01c5b 2024-11-21T00:18:13,521 DEBUG [M:0;5ed4808ef0e6:42433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccfb8f1021614970b7490b1d65816dae is 1478, key is \x00\x00\x00\x00\x00\x00\x00\x05/proc:d/1732148288863/Put/seqid=0 2024-11-21T00:18:13,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741844_1020 (size=8663) 2024-11-21T00:18:13,536 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.68 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccfb8f1021614970b7490b1d65816dae 2024-11-21T00:18:13,586 DEBUG [M:0;5ed4808ef0e6:42433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cc881a092854a4c9ebb6e292e5420f3 is 69, key is 5ed4808ef0e6,44897,1732148276946/rs:state/1732148279288/Put/seqid=0 2024-11-21T00:18:13,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741845_1021 (size=5156) 2024-11-21T00:18:13,994 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cc881a092854a4c9ebb6e292e5420f3 2024-11-21T00:18:14,015 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/04c3647d9a50453da21906b14bc01c5b as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/04c3647d9a50453da21906b14bc01c5b 2024-11-21T00:18:14,030 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/04c3647d9a50453da21906b14bc01c5b, entries=8, sequenceid=115, filesize=5.5 K 2024-11-21T00:18:14,034 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ccfb8f1021614970b7490b1d65816dae as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ccfb8f1021614970b7490b1d65816dae 2024-11-21T00:18:14,046 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ccfb8f1021614970b7490b1d65816dae, entries=14, sequenceid=115, filesize=8.5 K 2024-11-21T00:18:14,048 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9cc881a092854a4c9ebb6e292e5420f3 as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9cc881a092854a4c9ebb6e292e5420f3 2024-11-21T00:18:14,057 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9cc881a092854a4c9ebb6e292e5420f3, entries=1, sequenceid=115, filesize=5.0 K 2024-11-21T00:18:14,059 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(3140): Finished flush of dataSize ~50.24 KB/51442, heapSize ~60.84 KB/62304, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1022ms, sequenceid=115, compaction requested=false 2024-11-21T00:18:14,063 INFO [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:14,064 DEBUG [M:0;5ed4808ef0e6:42433 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148293037Disabling compacts and flushes for region at 1732148293037Disabling writes for close at 1732148293037Obtaining lock to block concurrent updates at 1732148293038 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148293038Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=51442, getHeapSize=62544, getOffHeapSize=0, getCellsCount=134 at 1732148293038Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148293040 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148293040Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148293061 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148293061Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148293494 (+433 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148293520 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148293521 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148293553 (+32 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148293585 (+32 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148293585Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63b0f76d: reopening flushed file at 1732148294012 (+427 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@38415bf: reopening flushed file at 1732148294030 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@799fe815: reopening flushed file at 1732148294046 (+16 ms)Finished flush of dataSize ~50.24 KB/51442, heapSize ~60.84 KB/62304, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1022ms, sequenceid=115, compaction requested=false at 1732148294059 (+13 ms)Writing region close event to WAL at 1732148294063 (+4 ms)Closed at 1732148294063 2024-11-21T00:18:14,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741830_1006 (size=59421) 2024-11-21T00:18:14,068 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 not finished, retry = 0 2024-11-21T00:18:14,169 INFO [M:0;5ed4808ef0e6:42433 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:18:14,169 INFO [M:0;5ed4808ef0e6:42433 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42433 2024-11-21T00:18:14,170 INFO [M:0;5ed4808ef0e6:42433 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:14,170 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:14,315 INFO [M:0;5ed4808ef0e6:42433 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:14,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:14,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42433-0x1015ac108c20000, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:14,318 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:14,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:14,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:14,318 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:14,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:14,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:14,319 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:18:14,319 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:14,320 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34235 2024-11-21T00:18:14,322 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34235 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:18:14,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:342350x0, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:14,331 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34235-0x1015ac108c20006 connected 2024-11-21T00:18:14,347 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:14,351 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:14,360 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:18:14,360 DEBUG [pool-237-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: INIT 2024-11-21T00:18:14,360 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b, hbase.cluster.distributed=false 2024-11-21T00:18:14,363 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/acl 2024-11-21T00:18:14,364 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34235 2024-11-21T00:18:14,365 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34235 2024-11-21T00:18:14,365 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34235 2024-11-21T00:18:14,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34235 2024-11-21T00:18:14,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34235 2024-11-21T00:18:14,394 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:14,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:14,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:14,394 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:14,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:14,395 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:14,395 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:18:14,395 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:14,396 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42819 2024-11-21T00:18:14,400 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42819 connecting to ZooKeeper ensemble=127.0.0.1:50128 2024-11-21T00:18:14,402 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:14,407 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:14,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428190x0, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:14,451 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428190x0, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:18:14,452 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:18:14,453 DEBUG [pool-242-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: INIT 2024-11-21T00:18:14,456 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42819-0x1015ac108c20007 connected 2024-11-21T00:18:14,463 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:18:14,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/master 2024-11-21T00:18:14,467 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/acl 2024-11-21T00:18:14,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42819 2024-11-21T00:18:14,474 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42819 2024-11-21T00:18:14,485 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42819 2024-11-21T00:18:14,486 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42819 2024-11-21T00:18:14,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42819 2024-11-21T00:18:14,505 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:34235 2024-11-21T00:18:14,506 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0857133414/backup-masters/5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:14,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:18:14,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:18:14,533 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on existing znode=/0857133414/backup-masters/5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:14,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:14,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0857133414/master 2024-11-21T00:18:14,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:14,542 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on existing znode=/0857133414/master 2024-11-21T00:18:14,543 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0857133414/backup-masters/5ed4808ef0e6,34235,1732148294317 from backup master directory 2024-11-21T00:18:14,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:18:14,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/backup-masters/5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:14,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/backup-masters 2024-11-21T00:18:14,552 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:14,552 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:14,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:14,589 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:18:14,611 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=2) cost 22ms. 2024-11-21T00:18:14,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741846_1022 (size=196) 2024-11-21T00:18:14,689 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:14,691 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:18:14,691 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:14,703 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(342): old store file tracker DEFAULT is the same with new store file tracker, skip migration 2024-11-21T00:18:14,709 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(316): Renamed hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776 to hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776-dead as it is dead 2024-11-21T00:18:14,711 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-21T00:18:14,711 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-21T00:18:14,711 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776-dead/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 2024-11-21T00:18:14,724 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776-dead/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 after 5ms 2024-11-21T00:18:14,735 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(328): Renamed hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776-dead/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 to hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 2024-11-21T00:18:14,735 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(330): Delete empty local region wal dir hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,42433,1732148275776-dead 2024-11-21T00:18:14,736 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:14,738 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:14,740 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C34235%2C1732148294317, suffix=, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,34235,1732148294317, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/oldWALs, maxLogs=10 2024-11-21T00:18:14,755 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,34235,1732148294317/5ed4808ef0e6%2C34235%2C1732148294317.1732148294740, exclude list is [], retry=0 2024-11-21T00:18:14,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:18:14,772 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/WALs/5ed4808ef0e6,34235,1732148294317/5ed4808ef0e6%2C34235%2C1732148294317.1732148294740 2024-11-21T00:18:14,772 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:18:14,772 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:14,773 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:14,773 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,773 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,775 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,777 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:18:14,777 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:14,789 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/04c3647d9a50453da21906b14bc01c5b 2024-11-21T00:18:14,789 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:14,790 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,791 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:18:14,791 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:14,801 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ccfb8f1021614970b7490b1d65816dae 2024-11-21T00:18:14,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:14,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,804 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:18:14,804 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:14,822 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9cc881a092854a4c9ebb6e292e5420f3 2024-11-21T00:18:14,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:14,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:18:14,824 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:14,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:14,825 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,826 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5516): Found 1 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals 2024-11-21T00:18:14,827 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 2024-11-21T00:18:14,857 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5793): Applied 0, skipped 136, firstSequenceIdInLog=3, maxSequenceIdInLog=117, path=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 2024-11-21T00:18:14,859 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C42433%2C1732148275776.1732148278085 2024-11-21T00:18:14,861 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,861 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:14,865 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:14,877 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/117.seqid, newMaxSeqId=117, maxSeqId=1 2024-11-21T00:18:14,879 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=118; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65185515, jitterRate=-0.028660133481025696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:14,879 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148294773Initializing all the Stores at 1732148294775 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148294775Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148294775Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148294775Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148294775Cleaning up temporary data from old regions at 1732148294861 (+86 ms)Region opened successfully at 1732148294879 (+18 ms) 2024-11-21T00:18:14,882 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:18:14,888 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f919812, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:14,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(884): The info family in master local region already has data in it, skip migrating... 2024-11-21T00:18:14,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:18:14,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:18:14,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:18:14,901 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:18:14,907 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:14,907 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:18:14,908 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=5, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:18:14,908 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:18:14,908 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure 2024-11-21T00:18:14,909 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure 2024-11-21T00:18:14,909 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 8 msec 2024-11-21T00:18:14,909 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:18:14,914 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=1588230740, regionState=OPEN, lastHost=5ed4808ef0e6,44897,1732148276946, regionLocation=5ed4808ef0e6,44897,1732148276946, openSeqNum=2 2024-11-21T00:18:14,914 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(349): Loaded hbase:meta state=OPEN, location=5ed4808ef0e6,44897,1732148276946, table=hbase:meta, region=1588230740 2024-11-21T00:18:14,914 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,44897,1732148276946, state=OPEN 2024-11-21T00:18:15,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:15,130 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:15,130 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:15,130 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:15,135 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 1 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:18:15,140 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/draining/5ed4808ef0e6,44897,1732148276946 already deleted, retry=false 2024-11-21T00:18:15,140 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(695): Processing expiration of 5ed4808ef0e6,44897,1732148276946 on 5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:15,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 5ed4808ef0e6,44897,1732148276946, splitWal=true, meta=true 2024-11-21T00:18:15,148 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1991): Scheduled ServerCrashProcedure pid=15 for 5ed4808ef0e6,44897,1732148276946 (carryingMeta=true) 5ed4808ef0e6,44897,1732148276946/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@572be2dd[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-11-21T00:18:15,152 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/balancer because node does not exist (not necessarily an error) 2024-11-21T00:18:15,161 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/balancer already deleted, retry=false 2024-11-21T00:18:15,162 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:18:15,163 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:18:15,205 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/normalizer already deleted, retry=false 2024-11-21T00:18:15,206 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:18:15,207 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:18:15,224 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/switch/split already deleted, retry=false 2024-11-21T00:18:15,226 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:18:15,235 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/switch/merge already deleted, retry=false 2024-11-21T00:18:15,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:18:15,267 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/snapshot-cleanup already deleted, retry=false 2024-11-21T00:18:15,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0857133414/running 2024-11-21T00:18:15,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:15,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0857133414/running 2024-11-21T00:18:15,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:15,278 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,34235,1732148294317, sessionid=0x1015ac108c20006, setting cluster-up flag (Was=false) 2024-11-21T00:18:15,289 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0857133414/flush-table-proc/acquired, /0857133414/flush-table-proc/reached, /0857133414/flush-table-proc/abort 2024-11-21T00:18:15,290 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:15,299 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0857133414/online-snapshot/acquired, /0857133414/online-snapshot/reached, /0857133414/online-snapshot/abort 2024-11-21T00:18:15,300 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:15,302 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1190): begin to load .lastflushedseqids at hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/.lastflushedseqids 2024-11-21T00:18:15,305 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:18:15,306 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:18:15,306 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,34235,1732148294317 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 1 5ed4808ef0e6,44897,1732148276946 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:15,308 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,309 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148325309 2024-11-21T00:18:15,309 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:18:15,309 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:18:15,309 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:18:15,309 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:18:15,309 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:18:15,309 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:18:15,310 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,310 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:18:15,310 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:18:15,310 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:18:15,310 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:18:15,311 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:18:15,311 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:18:15,311 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148295311,5,FailOnTimeoutGroup] 2024-11-21T00:18:15,311 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148295311,5,FailOnTimeoutGroup] 2024-11-21T00:18:15,311 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,311 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:18:15,311 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,311 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,311 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(169): Start pid=15, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 5ed4808ef0e6,44897,1732148276946, splitWal=true, meta=true 2024-11-21T00:18:15,312 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148295312, completionTime=-1 2024-11-21T00:18:15,312 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(848): The value of 'hbase.master.wait.on.regionservers.maxtostart' (-1) is set less than 'hbase.master.wait.on.regionservers.mintostart' (1), ignoring. 2024-11-21T00:18:15,313 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=15, state=RUNNABLE:SERVER_CRASH_SPLIT_META_LOGS, hasLock=true; ServerCrashProcedure 5ed4808ef0e6,44897,1732148276946, splitWal=true, meta=true, isMeta: true 2024-11-21T00:18:15,315 DEBUG [PEWorker-1 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946-splitting 2024-11-21T00:18:15,316 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=0; waited=0ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=0ms 2024-11-21T00:18:15,316 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946-splitting dir is empty, no logs to split. 2024-11-21T00:18:15,317 INFO [PEWorker-1 {}] master.SplitWALManager(105): 5ed4808ef0e6,44897,1732148276946 WAL count=0, meta=true 2024-11-21T00:18:15,321 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946-splitting dir is empty, no logs to split. 2024-11-21T00:18:15,321 INFO [PEWorker-1 {}] master.SplitWALManager(105): 5ed4808ef0e6,44897,1732148276946 WAL count=0, meta=true 2024-11-21T00:18:15,322 DEBUG [PEWorker-1 {}] procedure.ServerCrashProcedure(329): Check if 5ed4808ef0e6,44897,1732148276946 WAL splitting is done? wals=0, meta=true 2024-11-21T00:18:15,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:18:15,330 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:15,332 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-21T00:18:15,390 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(746): ClusterId : e99aa0aa-6523-4535-af8b-0f39a4d084f3 2024-11-21T00:18:15,390 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:18:15,404 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:18:15,404 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:18:15,426 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:18:15,426 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46ef6f45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:15,438 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:42819 2024-11-21T00:18:15,438 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:18:15,438 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:18:15,438 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:18:15,439 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,34235,1732148294317 with port=42819, startcode=1732148294393 2024-11-21T00:18:15,439 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:18:15,441 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49131, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:18:15,442 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34235 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:15,442 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34235 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:15,444 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b 2024-11-21T00:18:15,444 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41721 2024-11-21T00:18:15,444 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:18:15,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/rs 2024-11-21T00:18:15,457 DEBUG [RS:0;5ed4808ef0e6:42819 {}] zookeeper.ZKUtil(111): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on existing znode=/0857133414/rs/5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:15,457 WARN [RS:0;5ed4808ef0e6:42819 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:15,457 INFO [RS:0;5ed4808ef0e6:42819 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:15,457 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:15,457 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,42819,1732148294393] 2024-11-21T00:18:15,466 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=154ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=0ms 2024-11-21T00:18:15,482 WARN [5ed4808ef0e6:34235 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:18:15,505 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:18:15,507 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:18:15,507 INFO [RS:0;5ed4808ef0e6:42819 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:18:15,507 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,507 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:18:15,508 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:18:15,509 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,509 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:15,510 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:15,510 DEBUG [RS:0;5ed4808ef0e6:42819 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:15,514 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,514 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,514 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,514 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,514 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,514 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148294393-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:15,533 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:18:15,533 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148294393-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,550 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,42819,1732148294393, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:18:15,565 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(231): queueId=2-5ed4808ef0e6,42819,1732148294393, ReplicationSource: 2, currentBandwidth=0 2024-11-21T00:18:15,566 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,566 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.Replication(171): 5ed4808ef0e6,42819,1732148294393 started 2024-11-21T00:18:15,566 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,1-5ed4808ef0e6,42819,1732148294393 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:15,568 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,1-5ed4808ef0e6,42819,1732148294393 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@37a879e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:15,568 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,1-5ed4808ef0e6,42819,1732148294393 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:15,568 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,1-5ed4808ef0e6,42819,1732148294393 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:15,570 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:15,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:15,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:15,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@65b17084, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:15,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:15,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:15,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:15,573 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33746, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:15,574 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,1-5ed4808ef0e6,42819,1732148294393 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@a6fed76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:15,574 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,1-5ed4808ef0e6,42819,1732148294393 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:15,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:15,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@730965dc 2024-11-21T00:18:15,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:15,578 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33750, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=MasterService 2024-11-21T00:18:15,579 INFO [RS:0;5ed4808ef0e6:42819.replicationSource,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,42819,1732148294393 (queues=0) is replicating from cluster=e99aa0aa-6523-4535-af8b-0f39a4d084f3 to cluster=706478ae-f456-4e84-b28f-48af54a5495c 2024-11-21T00:18:15,584 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,2-5ed4808ef0e6,42819,1732148294393 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36249' 2024-11-21T00:18:15,586 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,2-5ed4808ef0e6,42819,1732148294393 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1b087045, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:15,586 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,2-5ed4808ef0e6,42819,1732148294393 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36249,-1 for getting cluster id 2024-11-21T00:18:15,586 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,2-5ed4808ef0e6,42819,1732148294393 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:15,587 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:15,587 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,42819,1732148294393, RpcServer on 5ed4808ef0e6/172.17.0.2:42819, sessionid=0x1015ac108c20007 2024-11-21T00:18:15,587 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:18:15,587 DEBUG [RS:0;5ed4808ef0e6:42819 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:15,587 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,42819,1732148294393' 2024-11-21T00:18:15,587 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0857133414/flush-table-proc/abort' 2024-11-21T00:18:15,587 DEBUG [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '706478ae-f456-4e84-b28f-48af54a5495c' 2024-11-21T00:18:15,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:15,588 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0857133414/flush-table-proc/acquired' 2024-11-21T00:18:15,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "706478ae-f456-4e84-b28f-48af54a5495c" 2024-11-21T00:18:15,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@409c0d19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:15,588 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:18:15,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36249,-1] 2024-11-21T00:18:15,588 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:18:15,588 DEBUG [RS:0;5ed4808ef0e6:42819 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:15,588 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,42819,1732148294393' 2024-11-21T00:18:15,588 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0857133414/online-snapshot/abort' 2024-11-21T00:18:15,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:15,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:15,589 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0857133414/online-snapshot/acquired' 2024-11-21T00:18:15,590 INFO [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33756, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:15,590 DEBUG [RS:0;5ed4808ef0e6:42819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:18:15,590 INFO [RS:0;5ed4808ef0e6:42819 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:18:15,590 INFO [RS:0;5ed4808ef0e6:42819 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:18:15,590 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,2-5ed4808ef0e6,42819,1732148294393 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@645c3726, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:15,591 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource,2-5ed4808ef0e6,42819,1732148294393 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:15,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:15,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a9a449 2024-11-21T00:18:15,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:15,594 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33766, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=MasterService 2024-11-21T00:18:15,595 INFO [RS:0;5ed4808ef0e6:42819.replicationSource,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.ReplicationSource(613): peerId=2, queueId=2-5ed4808ef0e6,42819,1732148294393 (queues=0) is replicating from cluster=e99aa0aa-6523-4535-af8b-0f39a4d084f3 to cluster=706478ae-f456-4e84-b28f-48af54a5495c 2024-11-21T00:18:15,691 INFO [RS:0;5ed4808ef0e6:42819 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:15,693 INFO [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C42819%2C1732148294393, suffix=, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs, maxLogs=10 2024-11-21T00:18:15,706 DEBUG [RS:0;5ed4808ef0e6:42819 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693, exclude list is [], retry=0 2024-11-21T00:18:15,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:18:15,711 INFO [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 2024-11-21T00:18:15,711 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSourceManager(789): Start tracking logs for wal group 5ed4808ef0e6%2C42819%2C1732148294393 for peer 2 2024-11-21T00:18:15,712 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSourceManager(789): Start tracking logs for wal group 5ed4808ef0e6%2C42819%2C1732148294393 for peer 1 2024-11-21T00:18:15,712 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C42819%2C1732148294393 2024-11-21T00:18:15,712 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,42819,1732148294393, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:18:15,712 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(382): peerId=2, starting shipping worker for walGroupId=5ed4808ef0e6%2C42819%2C1732148294393 2024-11-21T00:18:15,712 INFO [RS:0;5ed4808ef0e6:42819.replicationSource.shipper5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C42819%2C1732148294393 2024-11-21T00:18:15,712 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=2-5ed4808ef0e6,42819,1732148294393, ReplicationSourceWALReaderThread : 2 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:18:15,713 DEBUG [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:18:15,713 INFO [RS:0;5ed4808ef0e6:42819.replicationSource.shipper5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C42819%2C1732148294393 2024-11-21T00:18:15,713 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693, startPosition=0, beingWritten=true 2024-11-21T00:18:15,713 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693, startPosition=0, beingWritten=true 2024-11-21T00:18:15,733 DEBUG [5ed4808ef0e6:34235 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:18:15,733 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:15,735 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,42819,1732148294393, state=OPENING 2024-11-21T00:18:15,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:15,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:15,904 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=16, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:15,904 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:15,904 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:15,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,42819,1732148294393}] 2024-11-21T00:18:15,927 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:15,928 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:16,058 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:18:16,059 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57517, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:18:16,063 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:18:16,063 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:16,064 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:18:16,065 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C42819%2C1732148294393.meta, suffix=.meta, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs, maxLogs=10 2024-11-21T00:18:16,079 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.meta.1732148296066.meta, exclude list is [], retry=0 2024-11-21T00:18:16,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:18:16,084 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.meta.1732148296066.meta 2024-11-21T00:18:16,088 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:18:16,088 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:16,088 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:16,088 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:16,088 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:18:16,089 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:18:16,089 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:18:16,089 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:16,089 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:18:16,089 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:18:16,091 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:16,093 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:16,093 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:16,104 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/info/a165e616f35d4d459698751ae28b0e0d 2024-11-21T00:18:16,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:16,105 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:16,106 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:16,106 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:16,118 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/ns/8584763792984ca5b1fc3b4e98482bed 2024-11-21T00:18:16,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:16,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:16,120 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:16,120 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:16,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:16,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:16,122 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:16,122 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:16,131 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/table/870975dcc3ea4a9a82bc67ddb44e064e 2024-11-21T00:18:16,131 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:16,131 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:16,132 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740 2024-11-21T00:18:16,133 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740 2024-11-21T00:18:16,135 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:16,135 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:16,136 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:16,138 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:16,139 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=15; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64538487, jitterRate=-0.03830160200595856}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:16,139 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:18:16,139 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148296089Writing region info on filesystem at 1732148296089Initializing all the Stores at 1732148296090 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148296091 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148296091Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148296091Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148296091Cleaning up temporary data from old regions at 1732148296135 (+44 ms)Running coprocessor post-open hooks at 1732148296139 (+4 ms)Region opened successfully at 1732148296139 2024-11-21T00:18:16,141 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=17, masterSystemTime=1732148296057 2024-11-21T00:18:16,144 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:18:16,144 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:18:16,145 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=15, regionLocation=5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:16,147 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,42819,1732148294393, state=OPEN 2024-11-21T00:18:16,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:16,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0857133414/meta-region-server 2024-11-21T00:18:16,199 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:16,199 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0857133414/meta-region-server: CHANGED 2024-11-21T00:18:16,199 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=17, ppid=16, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:16,204 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-11-21T00:18:16,204 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,42819,1732148294393 in 295 msec 2024-11-21T00:18:16,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-11-21T00:18:16,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 880 msec 2024-11-21T00:18:16,238 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:16,238 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:16,644 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:16,644 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:16,969 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=1657ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=1503ms 2024-11-21T00:18:17,152 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:17,152 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:17,760 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:17,760 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:17,936 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:18,472 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=3159ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=3005ms 2024-11-21T00:18:18,475 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:18,488 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:19,285 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:19,296 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:19,827 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=4515ms, expected min=1 server(s), max=NO_LIMIT server(s), master is running 2024-11-21T00:18:19,827 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:18:19,828 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:19,828 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42819,1732148294393, seqNum=-1] 2024-11-21T00:18:19,828 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:19,829 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38159, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:19,836 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=16c2eddea6b943d19f79621ce6daf354, regionState=OPEN, lastHost=5ed4808ef0e6,44897,1732148276946, regionLocation=5ed4808ef0e6,44897,1732148276946, openSeqNum=2 2024-11-21T00:18:19,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:18:19,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148359836 2024-11-21T00:18:19,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148419836 2024-11-21T00:18:19,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 9 msec 2024-11-21T00:18:19,837 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34235,1732148294317-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:19,837 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34235,1732148294317-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:19,837 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34235,1732148294317-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:19,837 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(207): 5ed4808ef0e6,44897,1732148276946 had 2 regions 2024-11-21T00:18:19,837 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:34235, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:19,837 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:19,837 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:19,839 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=15, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 5ed4808ef0e6,44897,1732148276946, splitWal=true, meta=true, isMeta: false 2024-11-21T00:18:19,841 INFO [PEWorker-3 {}] master.SplitLogManager(171): hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946-splitting dir is empty, no logs to split. 2024-11-21T00:18:19,841 INFO [PEWorker-3 {}] master.SplitWALManager(105): 5ed4808ef0e6,44897,1732148276946 WAL count=0, meta=false 2024-11-21T00:18:19,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 5.293sec 2024-11-21T00:18:19,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:18:19,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:18:19,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:18:19,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:18:19,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:18:19,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34235,1732148294317-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:19,846 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34235,1732148294317-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:18:19,847 INFO [PEWorker-3 {}] master.SplitLogManager(171): hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,44897,1732148276946-splitting dir is empty, no logs to split. 2024-11-21T00:18:19,848 INFO [PEWorker-3 {}] master.SplitWALManager(105): 5ed4808ef0e6,44897,1732148276946 WAL count=0, meta=false 2024-11-21T00:18:19,848 DEBUG [PEWorker-3 {}] procedure.ServerCrashProcedure(329): Check if 5ed4808ef0e6,44897,1732148276946 WAL splitting is done? wals=0, meta=false 2024-11-21T00:18:19,850 WARN [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(177): unknown_server=5ed4808ef0e6,44897,1732148276946/hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:19,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN}] 2024-11-21T00:18:19,852 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:18:19,852 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:18:19,852 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34235,1732148294317-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:19,854 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=18, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN 2024-11-21T00:18:19,856 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=18, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-21T00:18:19,898 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c33c994, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:19,898 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34235,-1 for getting cluster id 2024-11-21T00:18:19,899 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:19,901 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e99aa0aa-6523-4535-af8b-0f39a4d084f3' 2024-11-21T00:18:19,904 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:19,905 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e99aa0aa-6523-4535-af8b-0f39a4d084f3" 2024-11-21T00:18:19,905 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@569c94dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:19,905 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34235,-1] 2024-11-21T00:18:19,905 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:19,906 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,907 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51894, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:19,908 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@651a2678, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:19,909 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:19,910 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42819,1732148294393, seqNum=-1] 2024-11-21T00:18:19,910 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:19,913 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38698, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:19,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(995): HBase has been restarted 2024-11-21T00:18:19,916 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:19,916 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.HBaseTestingUtil.restartHBaseCluster(HBaseTestingUtil.java:998) at org.apache.hadoop.hbase.HBaseTestingUtil.restartHBaseCluster(HBaseTestingUtil.java:978) at org.apache.hadoop.hbase.HBaseTestingUtil.restartHBaseCluster(HBaseTestingUtil.java:971) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:496) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:19,916 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,916 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,916 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:19,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2549): Invalidated connection. Updating master addresses before: 5ed4808ef0e6:34235 after: 5ed4808ef0e6:34235 2024-11-21T00:18:19,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e78affd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:19,917 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34235,-1 for getting cluster id 2024-11-21T00:18:19,918 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:19,918 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e99aa0aa-6523-4535-af8b-0f39a4d084f3' 2024-11-21T00:18:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e99aa0aa-6523-4535-af8b-0f39a4d084f3" 2024-11-21T00:18:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a5b3e13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34235,-1] 2024-11-21T00:18:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:19,919 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,920 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51926, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:19,921 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b049635, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:19,922 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:19,922 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:19,922 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2942c8e2 2024-11-21T00:18:19,923 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:19,924 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51938, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:19,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34235 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:18:19,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34235 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=2 2024-11-21T00:18:19,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34235 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:18:19,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34235 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=2 2024-11-21T00:18:19,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:18:19,927 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:19,927 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:510) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:19,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,928 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:19,928 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:18:19,928 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1497339394, stopped=false 2024-11-21T00:18:19,928 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,36249,1732148284219 2024-11-21T00:18:19,940 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1336725220/running 2024-11-21T00:18:19,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1336725220/running 2024-11-21T00:18:19,940 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:19,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:19,940 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:19,940 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:19,940 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:510) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:19,940 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,941 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on znode that does not yet exist, /1-1336725220/running 2024-11-21T00:18:19,941 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,41251,1732148284373' ***** 2024-11-21T00:18:19,941 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Set watcher on znode that does not yet exist, /1-1336725220/running 2024-11-21T00:18:19,941 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:18:19,941 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:18:19,941 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:18:19,941 INFO [RS:0;5ed4808ef0e6:41251 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:18:19,941 INFO [RS:0;5ed4808ef0e6:41251 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:18:19,942 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:19,942 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:19,942 INFO [RS:0;5ed4808ef0e6:41251 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:41251. 2024-11-21T00:18:19,942 DEBUG [RS:0;5ed4808ef0e6:41251 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:19,942 DEBUG [RS:0;5ed4808ef0e6:41251 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:19,942 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:18:19,942 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:18:19,942 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:18:19,942 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:18:19,943 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:19,945 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:18:19,945 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:18:19,945 DEBUG [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:19,945 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:19,945 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:19,945 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:19,945 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:19,945 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:19,946 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-21T00:18:19,977 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740/.tmp/ns/3b9b56b6eb904ce88534bd64f177c838 is 43, key is default/ns:d/1732148287380/Put/seqid=0 2024-11-21T00:18:19,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741835_1011 (size=5153) 2024-11-21T00:18:19,989 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740/.tmp/ns/3b9b56b6eb904ce88534bd64f177c838 2024-11-21T00:18:20,003 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740/.tmp/ns/3b9b56b6eb904ce88534bd64f177c838 as hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740/ns/3b9b56b6eb904ce88534bd64f177c838 2024-11-21T00:18:20,006 DEBUG [5ed4808ef0e6:34235 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:18:20,007 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=16c2eddea6b943d19f79621ce6daf354, regionState=OPENING, regionLocation=5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:20,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=18, ppid=15, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN because future has completed 2024-11-21T00:18:20,014 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740/ns/3b9b56b6eb904ce88534bd64f177c838, entries=2, sequenceid=6, filesize=5.0 K 2024-11-21T00:18:20,015 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 70ms, sequenceid=6, compaction requested=false 2024-11-21T00:18:20,015 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:18:20,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16c2eddea6b943d19f79621ce6daf354, server=5ed4808ef0e6,42819,1732148294393}] 2024-11-21T00:18:20,023 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T00:18:20,024 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:20,024 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:20,024 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:20,024 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148299945Running coprocessor pre-close hooks at 1732148299945Disabling compacts and flushes for region at 1732148299945Disabling writes for close at 1732148299945Obtaining lock to block concurrent updates at 1732148299946 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732148299946Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732148299946Flushing stores of hbase:meta,,1.1588230740 at 1732148299947 (+1 ms)Flushing 1588230740/ns: creating writer at 1732148299948 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732148299976 (+28 ms)Flushing 1588230740/ns: closing flushed file at 1732148299976Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e5b277b: reopening flushed file at 1732148300002 (+26 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 70ms, sequenceid=6, compaction requested=false at 1732148300015 (+13 ms)Writing region close event to WAL at 1732148300017 (+2 ms)Running coprocessor post-close hooks at 1732148300024 (+7 ms)Closed at 1732148300024 2024-11-21T00:18:20,024 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:20,145 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,41251,1732148284373; all regions closed. 2024-11-21T00:18:20,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741834_1010 (size=1152) 2024-11-21T00:18:20,149 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/WALs/5ed4808ef0e6,41251,1732148284373/5ed4808ef0e6%2C41251%2C1732148284373.meta.1732148287288.meta not finished, retry = 0 2024-11-21T00:18:20,175 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:20,175 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:20,176 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:18:20,178 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C42819%2C1732148294393.rep, suffix=, logDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393, archiveDir=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs, maxLogs=10 2024-11-21T00:18:20,192 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:20,194 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.rep.1732148300178, exclude list is [], retry=0 2024-11-21T00:18:20,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44135,DS-4b3334ca-02cd-4d05-8ac1-6283f2685b09,DISK] 2024-11-21T00:18:20,201 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.rep.1732148300178 2024-11-21T00:18:20,202 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39931:39931)] 2024-11-21T00:18:20,202 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(7752): Opening region: {ENCODED => 16c2eddea6b943d19f79621ce6daf354, NAME => 'hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:20,203 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:20,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:20,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. service=MultiRowMutationService 2024-11-21T00:18:20,203 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:18:20,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,203 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:20,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:20,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(7794): checking encryption for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(7797): checking classloading for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,206 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,209 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16c2eddea6b943d19f79621ce6daf354 columnFamilyName hfileref 2024-11-21T00:18:20,209 DEBUG [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:20,211 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(327): Store=16c2eddea6b943d19f79621ce6daf354/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:20,211 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,212 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16c2eddea6b943d19f79621ce6daf354 columnFamilyName queue 2024-11-21T00:18:20,213 DEBUG [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:20,223 DEBUG [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/queue/df2dd4c028384048ac6ac8a934b1bb66 2024-11-21T00:18:20,223 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(327): Store=16c2eddea6b943d19f79621ce6daf354/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:20,223 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,225 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16c2eddea6b943d19f79621ce6daf354 columnFamilyName sid 2024-11-21T00:18:20,225 DEBUG [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:20,225 INFO [StoreOpener-16c2eddea6b943d19f79621ce6daf354-1 {}] regionserver.HStore(327): Store=16c2eddea6b943d19f79621ce6daf354/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:20,225 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(1038): replaying wal for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,227 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,228 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,230 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:18:20,230 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:18:20,230 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(1048): stopping wal replay for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,230 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(1060): Cleaning up temporary data for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,231 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:18:20,233 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(1093): writing seq id for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,235 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(1114): Opened 16c2eddea6b943d19f79621ce6daf354; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75088682, jitterRate=0.11890855431556702}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:18:20,235 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:20,236 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegion(1006): Region open journal for 16c2eddea6b943d19f79621ce6daf354: Running coprocessor pre-open hook at 1732148300204Writing region info on filesystem at 1732148300204Initializing all the Stores at 1732148300205 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148300206 (+1 ms)Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148300206Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148300206Cleaning up temporary data from old regions at 1732148300230 (+24 ms)Running coprocessor post-open hooks at 1732148300235 (+5 ms)Region opened successfully at 1732148300236 (+1 ms) 2024-11-21T00:18:20,237 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354., pid=19, masterSystemTime=1732148300169 2024-11-21T00:18:20,240 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:20,240 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=19}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:20,241 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=18 updating hbase:meta row=16c2eddea6b943d19f79621ce6daf354, regionState=OPEN, openSeqNum=10, regionLocation=5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:20,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16c2eddea6b943d19f79621ce6daf354, server=5ed4808ef0e6,42819,1732148294393 because future has completed 2024-11-21T00:18:20,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-21T00:18:20,249 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; OpenRegionProcedure 16c2eddea6b943d19f79621ce6daf354, server=5ed4808ef0e6,42819,1732148294393 in 230 msec 2024-11-21T00:18:20,253 DEBUG [RS:0;5ed4808ef0e6:41251 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/oldWALs 2024-11-21T00:18:20,253 INFO [RS:0;5ed4808ef0e6:41251 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C41251%2C1732148284373.meta:.meta(num 1732148287288) 2024-11-21T00:18:20,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=18, resume processing ppid=15 2024-11-21T00:18:20,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=15, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=16c2eddea6b943d19f79621ce6daf354, ASSIGN in 397 msec 2024-11-21T00:18:20,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=15, state=RUNNABLE:ASSIGN_REPLICATION_QUEUES_ADD_MISSING_QUEUES, hasLock=false; org.apache.hadoop.hbase.master.replication.AssignReplicationQueuesProcedure}] 2024-11-21T00:18:20,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741832_1008 (size=93) 2024-11-21T00:18:20,258 DEBUG [RS:0;5ed4808ef0e6:41251 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/oldWALs 2024-11-21T00:18:20,258 INFO [RS:0;5ed4808ef0e6:41251 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C41251%2C1732148284373:(num 1732148286382) 2024-11-21T00:18:20,258 DEBUG [RS:0;5ed4808ef0e6:41251 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:20,258 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:20,259 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:20,259 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:20,259 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:20,259 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:20,259 INFO [RS:0;5ed4808ef0e6:41251 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41251 2024-11-21T00:18:20,266 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1336725220/rs/5ed4808ef0e6,41251,1732148284373 2024-11-21T00:18:20,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220/rs 2024-11-21T00:18:20,266 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:20,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='', locateType=CURRENT is [region=hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354., hostname=5ed4808ef0e6,42819,1732148294393, seqNum=10] 2024-11-21T00:18:20,277 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,41251,1732148284373] 2024-11-21T00:18:20,287 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-1336725220/draining/5ed4808ef0e6,41251,1732148284373 already deleted, retry=false 2024-11-21T00:18:20,287 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,41251,1732148284373 expired; onlineServers=0 2024-11-21T00:18:20,287 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,36249,1732148284219' ***** 2024-11-21T00:18:20,287 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:18:20,287 INFO [M:0;5ed4808ef0e6:36249 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:20,288 INFO [M:0;5ed4808ef0e6:36249 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:20,288 DEBUG [M:0;5ed4808ef0e6:36249 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:18:20,288 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:18:20,288 DEBUG [M:0;5ed4808ef0e6:36249 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:18:20,288 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148286076 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148286076,5,FailOnTimeoutGroup] 2024-11-21T00:18:20,288 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148286077 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148286077,5,FailOnTimeoutGroup] 2024-11-21T00:18:20,288 INFO [M:0;5ed4808ef0e6:36249 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:20,288 INFO [M:0;5ed4808ef0e6:36249 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:20,288 DEBUG [M:0;5ed4808ef0e6:36249 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:18:20,288 INFO [M:0;5ed4808ef0e6:36249 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:18:20,289 INFO [M:0;5ed4808ef0e6:36249 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:20,289 INFO [M:0;5ed4808ef0e6:36249 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:18:20,289 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:18:20,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1336725220/master 2024-11-21T00:18:20,298 DEBUG [M:0;5ed4808ef0e6:36249 {}] zookeeper.ZKUtil(347): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Unable to get data of znode /1-1336725220/master because node does not exist (not an error) 2024-11-21T00:18:20,298 WARN [M:0;5ed4808ef0e6:36249 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:18:20,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1336725220 2024-11-21T00:18:20,300 INFO [M:0;5ed4808ef0e6:36249 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/.lastflushedseqids 2024-11-21T00:18:20,300 DEBUG [PEWorker-2 {}] replication.AssignReplicationQueuesProcedure(120): There are 2 replication queues need to be claimed for 5ed4808ef0e6,44897,1732148276946 2024-11-21T00:18:20,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.ClaimReplicationQueueRemoteProcedure}] 2024-11-21T00:18:20,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741836_1012 (size=99) 2024-11-21T00:18:20,377 INFO [RS:0;5ed4808ef0e6:41251 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:20,377 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:20,377 INFO [RS:0;5ed4808ef0e6:41251 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,41251,1732148284373; zookeeper connection closed. 2024-11-21T00:18:20,377 DEBUG [pool-149-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41251-0x1015ac108c20004, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:20,377 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@20b8d9af {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@20b8d9af 2024-11-21T00:18:20,378 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:18:20,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42819 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.ClaimReplicationQueueCallable, pid=21 2024-11-21T00:18:20,707 INFO [M:0;5ed4808ef0e6:36249 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:18:20,707 INFO [M:0;5ed4808ef0e6:36249 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:18:20,708 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:20,708 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:20,708 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:20,708 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:20,708 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:20,708 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.62 KB heapSize=11.22 KB 2024-11-21T00:18:20,726 DEBUG [M:0;5ed4808ef0e6:36249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad75487d47c64b549b063c592dfed047 is 82, key is hbase:meta,,1/info:regioninfo/1732148287349/Put/seqid=0 2024-11-21T00:18:20,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741837_1013 (size=5672) 2024-11-21T00:18:21,131 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad75487d47c64b549b063c592dfed047 2024-11-21T00:18:21,155 DEBUG [M:0;5ed4808ef0e6:36249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/21ee2ef2821747ec89411e6f534a3d2a is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732148287387/Put/seqid=0 2024-11-21T00:18:21,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741838_1014 (size=5275) 2024-11-21T00:18:21,204 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:21,210 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:21,562 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/21ee2ef2821747ec89411e6f534a3d2a 2024-11-21T00:18:21,592 DEBUG [M:0;5ed4808ef0e6:36249 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ad99318cd554470481edef945a8f5568 is 69, key is 5ed4808ef0e6,41251,1732148284373/rs:state/1732148286149/Put/seqid=0 2024-11-21T00:18:21,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741839_1015 (size=5156) 2024-11-21T00:18:21,998 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ad99318cd554470481edef945a8f5568 2024-11-21T00:18:22,009 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ad75487d47c64b549b063c592dfed047 as hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ad75487d47c64b549b063c592dfed047 2024-11-21T00:18:22,017 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ad75487d47c64b549b063c592dfed047, entries=8, sequenceid=28, filesize=5.5 K 2024-11-21T00:18:22,019 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/21ee2ef2821747ec89411e6f534a3d2a as hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/21ee2ef2821747ec89411e6f534a3d2a 2024-11-21T00:18:22,026 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/21ee2ef2821747ec89411e6f534a3d2a, entries=3, sequenceid=28, filesize=5.2 K 2024-11-21T00:18:22,027 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ad99318cd554470481edef945a8f5568 as hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ad99318cd554470481edef945a8f5568 2024-11-21T00:18:22,035 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35053/user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ad99318cd554470481edef945a8f5568, entries=1, sequenceid=28, filesize=5.0 K 2024-11-21T00:18:22,036 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1328ms, sequenceid=28, compaction requested=false 2024-11-21T00:18:22,038 INFO [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:22,038 DEBUG [M:0;5ed4808ef0e6:36249 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148300707Disabling compacts and flushes for region at 1732148300707Disabling writes for close at 1732148300708 (+1 ms)Obtaining lock to block concurrent updates at 1732148300708Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148300708Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7802, getHeapSize=11424, getOffHeapSize=0, getCellsCount=35 at 1732148300708Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148300709 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148300709Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148300725 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148300725Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148301139 (+414 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148301155 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148301155Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148301575 (+420 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148301592 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148301592Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@946b889: reopening flushed file at 1732148302007 (+415 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@778668ec: reopening flushed file at 1732148302018 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1549a57b: reopening flushed file at 1732148302026 (+8 ms)Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1328ms, sequenceid=28, compaction requested=false at 1732148302037 (+11 ms)Writing region close event to WAL at 1732148302038 (+1 ms)Closed at 1732148302038 2024-11-21T00:18:22,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32973 is added to blk_1073741830_1006 (size=10165) 2024-11-21T00:18:22,042 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/e80ad71d-1ba9-f641-d355-40e41c6ddab8/MasterData/WALs/5ed4808ef0e6,36249,1732148284219/5ed4808ef0e6%2C36249%2C1732148284219.1732148285820 not finished, retry = 0 2024-11-21T00:18:22,143 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:22,143 INFO [M:0;5ed4808ef0e6:36249 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:18:22,144 INFO [M:0;5ed4808ef0e6:36249 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36249 2024-11-21T00:18:22,147 INFO [M:0;5ed4808ef0e6:36249 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:22,313 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:22,317 DEBUG [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.1732148295693 to pos 0, reset compression=false 2024-11-21T00:18:22,324 INFO [M:0;5ed4808ef0e6:36249 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:22,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:22,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36249-0x1015ac108c20003, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:22,328 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c12bb3b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:22,330 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d1ec4c5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:22,331 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:22,331 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9b8f4a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:22,331 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22b39d31{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:22,334 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:18:22,334 WARN [BP-755534061-172.17.0.2-1732148281066 heartbeating to localhost/127.0.0.1:35053 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:18:22,334 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:18:22,334 WARN [BP-755534061-172.17.0.2-1732148281066 heartbeating to localhost/127.0.0.1:35053 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-755534061-172.17.0.2-1732148281066 (Datanode Uuid 4c360eb8-683a-449e-9eab-cc5a9758ef82) service to localhost/127.0.0.1:35053 2024-11-21T00:18:22,335 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/cluster_b1725b2c-39a7-23ea-8a4d-e181886c5c86/data/data1/current/BP-755534061-172.17.0.2-1732148281066 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:22,336 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/cluster_b1725b2c-39a7-23ea-8a4d-e181886c5c86/data/data2/current/BP-755534061-172.17.0.2-1732148281066 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:22,336 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:18:22,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3a033f80{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:22,345 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2993da97{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:22,345 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:22,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d5127ed{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:22,346 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b608463{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:22,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:18:22,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:18:22,374 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:22,374 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:510) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:22,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:22,374 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:22,374 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:22,375 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:18:22,375 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1871460218, stopped=false 2024-11-21T00:18:22,375 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,34235,1732148294317 2024-11-21T00:18:22,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/running 2024-11-21T00:18:22,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/running 2024-11-21T00:18:22,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:22,393 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:22,393 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:22,393 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:22,393 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsForReplicationPeer(TestMasterReplication.java:510) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:22,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:18:22,393 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Set watcher on znode that does not yet exist, /0857133414/running 2024-11-21T00:18:22,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:22,394 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,42819,1732148294393' ***** 2024-11-21T00:18:22,394 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:18:22,394 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:18:22,394 INFO [RS:0;5ed4808ef0e6:42819 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:18:22,394 INFO [RS:0;5ed4808ef0e6:42819 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:18:22,394 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:18:22,394 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(3091): Received CLOSE for 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:42819. 2024-11-21T00:18:22,395 DEBUG [RS:0;5ed4808ef0e6:42819 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:22,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 16c2eddea6b943d19f79621ce6daf354, disabling compactions & flushes 2024-11-21T00:18:22,395 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:22,395 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:22,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:18:22,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. after waiting 0 ms 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:18:22,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:18:22,395 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:18:22,395 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 16c2eddea6b943d19f79621ce6daf354=hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354.} 2024-11-21T00:18:22,395 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 16c2eddea6b943d19f79621ce6daf354 2024-11-21T00:18:22,395 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:22,396 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:22,396 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:22,396 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:22,396 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:22,396 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=952 B heapSize=2.52 KB 2024-11-21T00:18:22,401 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/replication/16c2eddea6b943d19f79621ce6daf354/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-11-21T00:18:22,402 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:22,402 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:22,402 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:22,402 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 16c2eddea6b943d19f79621ce6daf354: Waiting for close lock at 1732148302395Running coprocessor pre-close hooks at 1732148302395Disabling compacts and flushes for region at 1732148302395Disabling writes for close at 1732148302395Writing region close event to WAL at 1732148302396 (+1 ms)Running coprocessor post-close hooks at 1732148302401 (+5 ms)Closed at 1732148302402 (+1 ms) 2024-11-21T00:18:22,402 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354. 2024-11-21T00:18:22,414 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/info/0747331f3ca24060b18d754177ee54da is 147, key is hbase:replication,,1732148287572.16c2eddea6b943d19f79621ce6daf354./info:regioninfo/1732148300241/Put/seqid=0 2024-11-21T00:18:22,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741851_1027 (size=6385) 2024-11-21T00:18:22,423 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:22,596 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:22,598 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:18:22,599 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:18:22,796 DEBUG [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:22,821 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=952 B at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/info/0747331f3ca24060b18d754177ee54da 2024-11-21T00:18:22,830 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/.tmp/info/0747331f3ca24060b18d754177ee54da as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/info/0747331f3ca24060b18d754177ee54da 2024-11-21T00:18:22,838 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/info/0747331f3ca24060b18d754177ee54da, entries=8, sequenceid=19, filesize=6.2 K 2024-11-21T00:18:22,839 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~952 B/952, heapSize ~1.76 KB/1800, currentSize=0 B/0 for 1588230740 in 443ms, sequenceid=19, compaction requested=false 2024-11-21T00:18:22,846 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/data/hbase/meta/1588230740/recovered.edits/22.seqid, newMaxSeqId=22, maxSeqId=14 2024-11-21T00:18:22,846 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:22,847 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:22,847 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:22,847 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148302395Running coprocessor pre-close hooks at 1732148302395Disabling compacts and flushes for region at 1732148302395Disabling writes for close at 1732148302396 (+1 ms)Obtaining lock to block concurrent updates at 1732148302396Preparing flush snapshotting stores in 1588230740 at 1732148302396Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=952, getHeapSize=2520, getOffHeapSize=0, getCellsCount=8 at 1732148302396Flushing stores of hbase:meta,,1.1588230740 at 1732148302397 (+1 ms)Flushing 1588230740/info: creating writer at 1732148302397Flushing 1588230740/info: appending metadata at 1732148302413 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732148302413Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79f17885: reopening flushed file at 1732148302829 (+416 ms)Finished flush of dataSize ~952 B/952, heapSize ~1.76 KB/1800, currentSize=0 B/0 for 1588230740 in 443ms, sequenceid=19, compaction requested=false at 1732148302839 (+10 ms)Writing region close event to WAL at 1732148302841 (+2 ms)Running coprocessor post-close hooks at 1732148302846 (+5 ms)Closed at 1732148302847 (+1 ms) 2024-11-21T00:18:22,847 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:22,996 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,42819,1732148294393; all regions closed. 2024-11-21T00:18:22,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741849_1025 (size=2156) 2024-11-21T00:18:23,002 DEBUG [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs 2024-11-21T00:18:23,002 INFO [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C42819%2C1732148294393.meta:.meta(num 1732148296066) 2024-11-21T00:18:23,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741850_1026 (size=796) 2024-11-21T00:18:23,005 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/WALs/5ed4808ef0e6,42819,1732148294393/5ed4808ef0e6%2C42819%2C1732148294393.rep.1732148300178 not finished, retry = 0 2024-11-21T00:18:23,109 DEBUG [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs 2024-11-21T00:18:23,109 INFO [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C42819%2C1732148294393.rep:(num 1732148300178) 2024-11-21T00:18:23,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741848_1024 (size=93) 2024-11-21T00:18:23,118 DEBUG [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/oldWALs 2024-11-21T00:18:23,118 INFO [RS:0;5ed4808ef0e6:42819 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C42819%2C1732148294393:(num 1732148295693) 2024-11-21T00:18:23,118 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:23,118 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:23,118 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:23,119 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:23,119 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:23,119 WARN [RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0-0 {event_type=RS_CLAIM_REPLICATION_QUEUE, pid=21}] regionserver.ReplicationSourceManager(915): Interrupted while waiting before transferring a queue. 2024-11-21T00:18:23,120 INFO [RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0-0 {event_type=RS_CLAIM_REPLICATION_QUEUE, pid=21}] regionserver.ReplicationSourceManager(920): Not transferring queue since we are shutting down 2024-11-21T00:18:23,120 DEBUG [RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0-0 {event_type=RS_CLAIM_REPLICATION_QUEUE, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-21T00:18:23,120 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,42819,1732148294393 because: Region server is closing 2024-11-21T00:18:23,120 INFO [RS:0;5ed4808ef0e6:42819 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:42819. 2024-11-21T00:18:23,121 DEBUG [RS:0;5ed4808ef0e6:42819 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:23,121 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:23,121 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:23,121 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:23,124 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:23,222 WARN [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:18:23,222 WARN [RS:0;5ed4808ef0e6:42819.replicationSource.shipper5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:18:23,222 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS:0;5ed4808ef0e6:42819.replicationSource.shipper5ed4808ef0e6%2C42819%2C1732148294393,1-5ed4808ef0e6,42819,1732148294393 terminated 2024-11-21T00:18:23,222 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(687): peerId=2, Closing source 2-5ed4808ef0e6,42819,1732148294393 because: Region server is closing 2024-11-21T00:18:23,222 INFO [RS:0;5ed4808ef0e6:42819 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:42819. 2024-11-21T00:18:23,222 DEBUG [RS:0;5ed4808ef0e6:42819 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:23,222 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:23,223 DEBUG [RS:0;5ed4808ef0e6:42819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:23,223 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:23,323 WARN [RS:0;5ed4808ef0e6:42819.replicationSource.wal-reader.5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:18:23,323 WARN [RS:0;5ed4808ef0e6:42819.replicationSource.shipper5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:18:23,323 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.ReplicationSource(739): peerId=2, ReplicationSourceWorker RS:0;5ed4808ef0e6:42819.replicationSource.shipper5ed4808ef0e6%2C42819%2C1732148294393,2-5ed4808ef0e6,42819,1732148294393 terminated 2024-11-21T00:18:23,323 INFO [RS:0;5ed4808ef0e6:42819 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42819 2024-11-21T00:18:23,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414/rs 2024-11-21T00:18:23,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/rs/5ed4808ef0e6,42819,1732148294393 2024-11-21T00:18:23,371 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:23,382 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,42819,1732148294393] 2024-11-21T00:18:23,392 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /0857133414/draining/5ed4808ef0e6,42819,1732148294393 already deleted, retry=false 2024-11-21T00:18:23,392 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,42819,1732148294393 expired; onlineServers=0 2024-11-21T00:18:23,392 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,34235,1732148294317' ***** 2024-11-21T00:18:23,392 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:18:23,392 INFO [M:0;5ed4808ef0e6:34235 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:23,392 INFO [M:0;5ed4808ef0e6:34235 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:23,393 DEBUG [M:0;5ed4808ef0e6:34235 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:18:23,393 DEBUG [M:0;5ed4808ef0e6:34235 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:18:23,393 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:18:23,393 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148295311 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148295311,5,FailOnTimeoutGroup] 2024-11-21T00:18:23,393 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148295311 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148295311,5,FailOnTimeoutGroup] 2024-11-21T00:18:23,393 INFO [M:0;5ed4808ef0e6:34235 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:23,393 INFO [M:0;5ed4808ef0e6:34235 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:23,393 DEBUG [M:0;5ed4808ef0e6:34235 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:18:23,393 INFO [M:0;5ed4808ef0e6:34235 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:18:23,393 INFO [M:0;5ed4808ef0e6:34235 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:23,394 INFO [M:0;5ed4808ef0e6:34235 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:18:23,394 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:18:23,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0857133414/master 2024-11-21T00:18:23,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0857133414 2024-11-21T00:18:23,403 DEBUG [M:0;5ed4808ef0e6:34235 {}] zookeeper.ZKUtil(347): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Unable to get data of znode /0857133414/master because node does not exist (not an error) 2024-11-21T00:18:23,403 WARN [M:0;5ed4808ef0e6:34235 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:18:23,404 INFO [M:0;5ed4808ef0e6:34235 {}] master.ServerManager(1134): Rewriting .lastflushedseqids file at: hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/.lastflushedseqids 2024-11-21T00:18:23,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741852_1028 (size=181) 2024-11-21T00:18:23,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:23,482 INFO [RS:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:23,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42819-0x1015ac108c20007, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:23,482 INFO [RS:0;5ed4808ef0e6:42819 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,42819,1732148294393; zookeeper connection closed. 2024-11-21T00:18:23,482 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69ed713 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69ed713 2024-11-21T00:18:23,482 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:18:23,811 INFO [M:0;5ed4808ef0e6:34235 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:18:23,811 INFO [M:0;5ed4808ef0e6:34235 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:18:23,811 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:23,811 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:23,811 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:23,812 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:23,812 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:23,812 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=18.30 KB heapSize=24.09 KB 2024-11-21T00:18:23,832 DEBUG [M:0;5ed4808ef0e6:34235 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8978d9ceae0407da8ff941950f6c0f9 is 82, key is hbase:meta,,1/info:regioninfo/1732148296145/Put/seqid=0 2024-11-21T00:18:23,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741853_1029 (size=5680) 2024-11-21T00:18:24,238 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8978d9ceae0407da8ff941950f6c0f9 2024-11-21T00:18:24,262 DEBUG [M:0;5ed4808ef0e6:34235 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd7676e4994f493cb5f2479d8ecbb830 is 375, key is \x00\x00\x00\x00\x00\x00\x00\x12/proc:d/1732148300251/Put/seqid=0 2024-11-21T00:18:24,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741854_1030 (size=7319) 2024-11-21T00:18:24,269 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.69 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd7676e4994f493cb5f2479d8ecbb830 2024-11-21T00:18:24,295 DEBUG [M:0;5ed4808ef0e6:34235 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd26fc35de5b41f3bbfc0dfb260efd11 is 69, key is 5ed4808ef0e6,42819,1732148294393/rs:state/1732148295442/Put/seqid=0 2024-11-21T00:18:24,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741855_1031 (size=5371) 2024-11-21T00:18:24,661 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:18:24,707 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=119 B at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd26fc35de5b41f3bbfc0dfb260efd11 2024-11-21T00:18:24,718 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fd26fc35de5b41f3bbfc0dfb260efd11 2024-11-21T00:18:24,719 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f8978d9ceae0407da8ff941950f6c0f9 as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8978d9ceae0407da8ff941950f6c0f9 2024-11-21T00:18:24,726 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f8978d9ceae0407da8ff941950f6c0f9, entries=8, sequenceid=172, filesize=5.5 K 2024-11-21T00:18:24,727 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/cd7676e4994f493cb5f2479d8ecbb830 as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd7676e4994f493cb5f2479d8ecbb830 2024-11-21T00:18:24,757 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/cd7676e4994f493cb5f2479d8ecbb830, entries=7, sequenceid=172, filesize=7.1 K 2024-11-21T00:18:24,759 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fd26fc35de5b41f3bbfc0dfb260efd11 as hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fd26fc35de5b41f3bbfc0dfb260efd11 2024-11-21T00:18:24,766 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fd26fc35de5b41f3bbfc0dfb260efd11 2024-11-21T00:18:24,766 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41721/user/jenkins/test-data/3c943836-b770-af7a-d1f7-78ab7a50824b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fd26fc35de5b41f3bbfc0dfb260efd11, entries=2, sequenceid=172, filesize=5.2 K 2024-11-21T00:18:24,768 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.30 KB/18738, heapSize ~23.79 KB/24360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 956ms, sequenceid=172, compaction requested=false 2024-11-21T00:18:24,774 INFO [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:24,774 DEBUG [M:0;5ed4808ef0e6:34235 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148303811Disabling compacts and flushes for region at 1732148303811Disabling writes for close at 1732148303812 (+1 ms)Obtaining lock to block concurrent updates at 1732148303812Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148303812Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=18738, getHeapSize=24600, getOffHeapSize=0, getCellsCount=65 at 1732148303812Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148303813 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148303813Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148303832 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148303832Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148304245 (+413 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148304262 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148304262Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148304277 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148304294 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148304294Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@453f4cb5: reopening flushed file at 1732148304718 (+424 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@177903c: reopening flushed file at 1732148304726 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7473b169: reopening flushed file at 1732148304757 (+31 ms)Finished flush of dataSize ~18.30 KB/18738, heapSize ~23.79 KB/24360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 956ms, sequenceid=172, compaction requested=false at 1732148304768 (+11 ms)Writing region close event to WAL at 1732148304774 (+6 ms)Closed at 1732148304774 2024-11-21T00:18:24,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44135 is added to blk_1073741847_1023 (size=22949) 2024-11-21T00:18:24,783 INFO [M:0;5ed4808ef0e6:34235 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:18:24,783 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:24,783 INFO [M:0;5ed4808ef0e6:34235 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34235 2024-11-21T00:18:24,783 INFO [M:0;5ed4808ef0e6:34235 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:24,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:24,914 INFO [M:0;5ed4808ef0e6:34235 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:24,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34235-0x1015ac108c20006, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:24,920 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@318ae30c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:24,920 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@60268925{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:24,920 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:24,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1edffc5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:24,921 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a66a1cb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:24,922 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:18:24,922 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:18:24,922 WARN [BP-2095928671-172.17.0.2-1732148269835 heartbeating to localhost/127.0.0.1:41721 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:18:24,922 WARN [BP-2095928671-172.17.0.2-1732148269835 heartbeating to localhost/127.0.0.1:41721 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2095928671-172.17.0.2-1732148269835 (Datanode Uuid db4e7784-de1d-4f93-a596-ea21b9cdc983) service to localhost/127.0.0.1:41721 2024-11-21T00:18:24,923 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d/data/data1/current/BP-2095928671-172.17.0.2-1732148269835 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:24,923 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/cluster_a2c2b178-844e-5cbc-dbec-de007b979b1d/data/data2/current/BP-2095928671-172.17.0.2-1732148269835 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:24,923 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:18:24,929 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4219126d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:24,930 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@63a6952a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:24,930 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:24,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22c42bf7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:24,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65423e7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/35c110e9-35fd-6561-60fb-7165452ea314/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:24,939 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:18:24,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:18:24,964 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testBasePeerConfigsForReplicationPeer Thread=108 (was 11) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Time-limited test-EventThread java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41721 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: zk-event-processor-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41721 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41721 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-EventThread java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41721 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35053 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35053 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35053 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35053 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35053 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:50128) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5ed4808ef0e6:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:41721 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5ed4808ef0e6:0.procedureResultReporter app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35053 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5ed4808ef0e6:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41721 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41721 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35053 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:50128) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-event-processor-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41721 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:41721 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3bb7d7bf java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:35053 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35053 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=428 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=830 (was 1012), ProcessCount=11 (was 11), AvailableMemoryMB=1764 (was 2474) 2024-11-21T00:18:24,971 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer Thread=108, OpenFileDescriptor=428, MaxFileDescriptor=1048576, SystemLoadAverage=830, ProcessCount=11, AvailableMemoryMB=1763 2024-11-21T00:18:24,984 INFO [Time-limited test {}] replication.TestMasterReplication(553): testBasePeerConfigsForPeerMutations 2024-11-21T00:18:24,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.log.dir so I do NOT create it in target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e 2024-11-21T00:18:24,985 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:18:24,985 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.tmp.dir so I do NOT create it in target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e 2024-11-21T00:18:24,985 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/efe8d73f-c551-a32a-e962-51726e39e08c/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:18:24,985 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e 2024-11-21T00:18:24,985 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5, deleteOnExit=true 2024-11-21T00:18:24,987 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5/zookeeper_0, clientPort=63439, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:18:24,988 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63439 2024-11-21T00:18:24,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:18:24,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:18:24,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/test.cache.data in system properties and HBase conf 2024-11-21T00:18:24,988 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:18:24,989 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:18:24,989 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:18:24,990 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:18:25,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015ac108c20005, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:18:25,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015ac108c20002, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:18:25,036 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015ac108c20005, quorum=127.0.0.1:50128, baseZNode=/1-1336725220 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:18:25,036 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015ac108c20002, quorum=127.0.0.1:50128, baseZNode=/0857133414 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:18:25,338 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-21T00:18:25,340 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:25,362 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:25,387 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:25,392 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:25,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:25,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:25,394 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:18:25,395 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:25,396 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4343dd3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:25,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aae1200{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:25,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7597814{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/java.io.tmpdir/jetty-localhost-40177-hadoop-hdfs-3_4_1-tests_jar-_-any-12334670974976637183/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:25,532 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b56bf5{HTTP/1.1, (http/1.1)}{localhost:40177} 2024-11-21T00:18:25,532 INFO [Time-limited test {}] server.Server(415): Started @40081ms 2024-11-21T00:18:25,904 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:25,907 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:25,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:25,908 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:25,908 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:18:25,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1474973e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:25,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10fb0c59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:26,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4828004d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/java.io.tmpdir/jetty-localhost-43953-hadoop-hdfs-3_4_1-tests_jar-_-any-11562386740670554995/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:26,008 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d554b36{HTTP/1.1, (http/1.1)}{localhost:43953} 2024-11-21T00:18:26,008 INFO [Time-limited test {}] server.Server(415): Started @40557ms 2024-11-21T00:18:26,009 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:18:26,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:26,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:18:26,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:18:26,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:27,200 WARN [Thread-501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5/data/data2/current/BP-1011016395-172.17.0.2-1732148305013/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:27,200 WARN [Thread-500 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5/data/data1/current/BP-1011016395-172.17.0.2-1732148305013/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:27,221 WARN [Thread-488 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:18:27,224 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1cc0e53f010120b7 with lease ID 0xf92945647acfc40a: Processing first storage report for DS-134b1c38-75d0-4fc3-a900-cf185ec4c3a6 from datanode DatanodeRegistration(127.0.0.1:42325, datanodeUuid=e821f767-cf8e-40fa-869b-0c8d25e1e9c4, infoPort=46535, infoSecurePort=0, ipcPort=42285, storageInfo=lv=-57;cid=testClusterID;nsid=1842974383;c=1732148305013) 2024-11-21T00:18:27,224 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1cc0e53f010120b7 with lease ID 0xf92945647acfc40a: from storage DS-134b1c38-75d0-4fc3-a900-cf185ec4c3a6 node DatanodeRegistration(127.0.0.1:42325, datanodeUuid=e821f767-cf8e-40fa-869b-0c8d25e1e9c4, infoPort=46535, infoSecurePort=0, ipcPort=42285, storageInfo=lv=-57;cid=testClusterID;nsid=1842974383;c=1732148305013), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:27,225 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1cc0e53f010120b7 with lease ID 0xf92945647acfc40a: Processing first storage report for DS-fd3ce0ef-747e-48ea-82c7-4349d68bd0b7 from datanode DatanodeRegistration(127.0.0.1:42325, datanodeUuid=e821f767-cf8e-40fa-869b-0c8d25e1e9c4, infoPort=46535, infoSecurePort=0, ipcPort=42285, storageInfo=lv=-57;cid=testClusterID;nsid=1842974383;c=1732148305013) 2024-11-21T00:18:27,225 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1cc0e53f010120b7 with lease ID 0xf92945647acfc40a: from storage DS-fd3ce0ef-747e-48ea-82c7-4349d68bd0b7 node DatanodeRegistration(127.0.0.1:42325, datanodeUuid=e821f767-cf8e-40fa-869b-0c8d25e1e9c4, infoPort=46535, infoSecurePort=0, ipcPort=42285, storageInfo=lv=-57;cid=testClusterID;nsid=1842974383;c=1732148305013), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:27,241 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e 2024-11-21T00:18:27,242 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:27,243 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:27,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:18:27,657 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182 with version=8 2024-11-21T00:18:27,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/hbase-staging 2024-11-21T00:18:27,659 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:27,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:27,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:27,659 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:27,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:27,659 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:27,659 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:18:27,659 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:27,660 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39345 2024-11-21T00:18:27,661 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39345 connecting to ZooKeeper ensemble=127.0.0.1:63439 2024-11-21T00:18:27,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:393450x0, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:27,708 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39345-0x1015ac198060000 connected 2024-11-21T00:18:27,843 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:27,845 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:27,847 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on znode that does not yet exist, /0-1278390213/running 2024-11-21T00:18:27,847 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182, hbase.cluster.distributed=false 2024-11-21T00:18:27,849 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on znode that does not yet exist, /0-1278390213/acl 2024-11-21T00:18:27,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39345 2024-11-21T00:18:27,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39345 2024-11-21T00:18:27,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39345 2024-11-21T00:18:27,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39345 2024-11-21T00:18:27,853 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39345 2024-11-21T00:18:27,876 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:27,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:27,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:27,876 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:27,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:27,876 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:27,876 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:18:27,877 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:27,879 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33987 2024-11-21T00:18:27,881 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33987 connecting to ZooKeeper ensemble=127.0.0.1:63439 2024-11-21T00:18:27,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:27,883 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:27,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:339870x0, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:27,896 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:339870x0, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on znode that does not yet exist, /0-1278390213/running 2024-11-21T00:18:27,896 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33987-0x1015ac198060001 connected 2024-11-21T00:18:27,897 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:18:27,900 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:18:27,901 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on znode that does not yet exist, /0-1278390213/master 2024-11-21T00:18:27,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on znode that does not yet exist, /0-1278390213/acl 2024-11-21T00:18:27,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33987 2024-11-21T00:18:27,907 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33987 2024-11-21T00:18:27,909 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33987 2024-11-21T00:18:27,910 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33987 2024-11-21T00:18:27,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33987 2024-11-21T00:18:27,925 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:39345 2024-11-21T00:18:27,925 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0-1278390213/backup-masters/5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:27,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213/backup-masters 2024-11-21T00:18:27,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213/backup-masters 2024-11-21T00:18:27,938 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on existing znode=/0-1278390213/backup-masters/5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:27,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-1278390213/master 2024-11-21T00:18:27,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:27,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:27,948 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on existing znode=/0-1278390213/master 2024-11-21T00:18:27,949 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0-1278390213/backup-masters/5ed4808ef0e6,39345,1732148307659 from backup master directory 2024-11-21T00:18:27,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213/backup-masters 2024-11-21T00:18:27,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-1278390213/backup-masters/5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:27,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213/backup-masters 2024-11-21T00:18:27,958 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:27,958 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:27,963 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/hbase.id] with ID: 86350b17-0c49-45fe-8c98-dea9d4adeacb 2024-11-21T00:18:27,963 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/.tmp/hbase.id 2024-11-21T00:18:27,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:18:28,372 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/.tmp/hbase.id]:[hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/hbase.id] 2024-11-21T00:18:28,385 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:28,385 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:18:28,387 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-21T00:18:28,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:28,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:28,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:18:28,849 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:28,850 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:18:28,850 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:28,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:18:29,259 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store 2024-11-21T00:18:29,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:18:29,667 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:29,668 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:29,668 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:29,668 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:29,668 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:29,668 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:29,668 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:29,668 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148309668Disabling compacts and flushes for region at 1732148309668Disabling writes for close at 1732148309668Writing region close event to WAL at 1732148309668Closed at 1732148309668 2024-11-21T00:18:29,669 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/.initializing 2024-11-21T00:18:29,670 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/WALs/5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:29,671 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:29,674 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C39345%2C1732148307659, suffix=, logDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/WALs/5ed4808ef0e6,39345,1732148307659, archiveDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/oldWALs, maxLogs=10 2024-11-21T00:18:29,691 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/WALs/5ed4808ef0e6,39345,1732148307659/5ed4808ef0e6%2C39345%2C1732148307659.1732148309674, exclude list is [], retry=0 2024-11-21T00:18:29,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42325,DS-134b1c38-75d0-4fc3-a900-cf185ec4c3a6,DISK] 2024-11-21T00:18:29,707 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/WALs/5ed4808ef0e6,39345,1732148307659/5ed4808ef0e6%2C39345%2C1732148307659.1732148309674 2024-11-21T00:18:29,712 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46535:46535)] 2024-11-21T00:18:29,712 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:29,713 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:29,714 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,714 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,732 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:18:29,753 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:29,760 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:29,760 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:18:29,771 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:29,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:29,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:18:29,778 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:29,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:29,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:18:29,781 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:29,782 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:29,782 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,783 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,784 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,786 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,786 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,787 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:29,788 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:29,792 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:29,793 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60874644, jitterRate=-0.09289711713790894}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:29,793 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148309714Initializing all the Stores at 1732148309720 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148309720Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148309728 (+8 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148309728Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148309728Cleaning up temporary data from old regions at 1732148309786 (+58 ms)Region opened successfully at 1732148309793 (+7 ms) 2024-11-21T00:18:29,794 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:18:29,798 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ce67940, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:29,799 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:18:29,799 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:18:29,800 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:18:29,800 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:18:29,801 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:18:29,801 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:18:29,801 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:18:29,806 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:18:29,807 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Unable to get data of znode /0-1278390213/balancer because node does not exist (not necessarily an error) 2024-11-21T00:18:29,810 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-1278390213/balancer already deleted, retry=false 2024-11-21T00:18:29,811 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:18:29,812 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Unable to get data of znode /0-1278390213/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:18:29,897 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-1278390213/normalizer already deleted, retry=false 2024-11-21T00:18:29,898 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:18:29,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Unable to get data of znode /0-1278390213/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:18:29,926 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-1278390213/switch/split already deleted, retry=false 2024-11-21T00:18:29,928 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Unable to get data of znode /0-1278390213/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:18:29,937 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-1278390213/switch/merge already deleted, retry=false 2024-11-21T00:18:29,939 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Unable to get data of znode /0-1278390213/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:18:29,947 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-1278390213/snapshot-cleanup already deleted, retry=false 2024-11-21T00:18:29,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-1278390213/running 2024-11-21T00:18:29,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-1278390213/running 2024-11-21T00:18:29,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:29,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:29,959 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,39345,1732148307659, sessionid=0x1015ac198060000, setting cluster-up flag (Was=false) 2024-11-21T00:18:29,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:29,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:30,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-1278390213/flush-table-proc/acquired, /0-1278390213/flush-table-proc/reached, /0-1278390213/flush-table-proc/abort 2024-11-21T00:18:30,147 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:30,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:30,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:30,211 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-1278390213/online-snapshot/acquired, /0-1278390213/online-snapshot/reached, /0-1278390213/online-snapshot/abort 2024-11-21T00:18:30,212 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:30,213 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:18:30,215 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:30,215 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:18:30,215 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:18:30,216 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,39345,1732148307659 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:18:30,217 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(746): ClusterId : 86350b17-0c49-45fe-8c98-dea9d4adeacb 2024-11-21T00:18:30,217 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:18:30,217 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:30,217 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:30,217 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:30,217 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:30,217 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:18:30,217 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,218 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:30,218 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,218 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148340218 2024-11-21T00:18:30,218 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:18:30,218 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:18:30,219 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:18:30,219 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:30,220 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:18:30,220 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:18:30,220 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:18:30,220 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148310220,5,FailOnTimeoutGroup] 2024-11-21T00:18:30,220 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148310220,5,FailOnTimeoutGroup] 2024-11-21T00:18:30,220 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:18:30,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,221 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:30,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,221 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:18:30,222 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:18:30,222 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:18:30,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:18:30,232 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:18:30,233 DEBUG [RS:0;5ed4808ef0e6:33987 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@444c7ef0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:30,244 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:33987 2024-11-21T00:18:30,244 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:18:30,244 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:18:30,244 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:18:30,245 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,39345,1732148307659 with port=33987, startcode=1732148307875 2024-11-21T00:18:30,245 DEBUG [RS:0;5ed4808ef0e6:33987 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:18:30,247 INFO [HMaster-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52451, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:18:30,248 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39345 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:30,248 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39345 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:30,250 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182 2024-11-21T00:18:30,250 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36001 2024-11-21T00:18:30,250 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:18:30,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213/rs 2024-11-21T00:18:30,263 DEBUG [RS:0;5ed4808ef0e6:33987 {}] zookeeper.ZKUtil(111): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on existing znode=/0-1278390213/rs/5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:30,263 WARN [RS:0;5ed4808ef0e6:33987 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:30,264 INFO [RS:0;5ed4808ef0e6:33987 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:30,264 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,33987,1732148307875] 2024-11-21T00:18:30,264 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:30,271 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:18:30,274 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:18:30,281 INFO [RS:0;5ed4808ef0e6:33987 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:18:30,282 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,282 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:18:30,289 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:18:30,289 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,289 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,289 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,289 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:30,290 DEBUG [RS:0;5ed4808ef0e6:33987 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:30,292 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,292 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,292 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,292 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,292 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,292 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33987,1732148307875-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:30,313 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:18:30,313 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33987,1732148307875-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,314 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,314 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.Replication(171): 5ed4808ef0e6,33987,1732148307875 started 2024-11-21T00:18:30,335 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:30,335 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,33987,1732148307875, RpcServer on 5ed4808ef0e6/172.17.0.2:33987, sessionid=0x1015ac198060001 2024-11-21T00:18:30,335 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:18:30,335 DEBUG [RS:0;5ed4808ef0e6:33987 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:30,335 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33987,1732148307875' 2024-11-21T00:18:30,335 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-1278390213/flush-table-proc/abort' 2024-11-21T00:18:30,336 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-1278390213/flush-table-proc/acquired' 2024-11-21T00:18:30,336 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:18:30,336 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:18:30,336 DEBUG [RS:0;5ed4808ef0e6:33987 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:30,337 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33987,1732148307875' 2024-11-21T00:18:30,337 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-1278390213/online-snapshot/abort' 2024-11-21T00:18:30,337 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-1278390213/online-snapshot/acquired' 2024-11-21T00:18:30,337 DEBUG [RS:0;5ed4808ef0e6:33987 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:18:30,337 INFO [RS:0;5ed4808ef0e6:33987 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:18:30,338 INFO [RS:0;5ed4808ef0e6:33987 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:18:30,438 INFO [RS:0;5ed4808ef0e6:33987 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:30,440 INFO [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33987%2C1732148307875, suffix=, logDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875, archiveDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/oldWALs, maxLogs=10 2024-11-21T00:18:30,456 DEBUG [RS:0;5ed4808ef0e6:33987 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440, exclude list is [], retry=0 2024-11-21T00:18:30,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42325,DS-134b1c38-75d0-4fc3-a900-cf185ec4c3a6,DISK] 2024-11-21T00:18:30,463 INFO [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 2024-11-21T00:18:30,463 DEBUG [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46535:46535)] 2024-11-21T00:18:30,631 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:18:30,632 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182 2024-11-21T00:18:30,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:18:31,039 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:31,041 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:31,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:31,042 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:31,044 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:31,044 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:31,046 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:31,046 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:31,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:31,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:31,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740 2024-11-21T00:18:31,050 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740 2024-11-21T00:18:31,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:31,051 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:31,052 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:31,053 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:31,055 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:31,056 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65189703, jitterRate=-0.028597727417945862}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:31,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148311039Initializing all the Stores at 1732148311040 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148311040Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148311040Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148311041 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148311041Cleaning up temporary data from old regions at 1732148311051 (+10 ms)Region opened successfully at 1732148311057 (+6 ms) 2024-11-21T00:18:31,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:31,057 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:31,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:31,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:31,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:31,058 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:31,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148311057Disabling compacts and flushes for region at 1732148311057Disabling writes for close at 1732148311057Writing region close event to WAL at 1732148311058 (+1 ms)Closed at 1732148311058 2024-11-21T00:18:31,059 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:31,059 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:18:31,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:18:31,061 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:31,062 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:18:31,212 DEBUG [5ed4808ef0e6:39345 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:18:31,213 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:31,214 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33987,1732148307875, state=OPENING 2024-11-21T00:18:31,232 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:18:31,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:31,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:31,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-1278390213/meta-region-server: CHANGED 2024-11-21T00:18:31,273 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-1278390213/meta-region-server: CHANGED 2024-11-21T00:18:31,273 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:31,273 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33987,1732148307875}] 2024-11-21T00:18:31,426 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:18:31,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48973, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:18:31,432 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:18:31,432 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:31,433 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:18:31,435 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33987%2C1732148307875.meta, suffix=.meta, logDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875, archiveDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/oldWALs, maxLogs=10 2024-11-21T00:18:31,448 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.meta.1732148311435.meta, exclude list is [], retry=0 2024-11-21T00:18:31,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42325,DS-134b1c38-75d0-4fc3-a900-cf185ec4c3a6,DISK] 2024-11-21T00:18:31,453 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.meta.1732148311435.meta 2024-11-21T00:18:31,453 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46535:46535)] 2024-11-21T00:18:31,454 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:31,454 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:31,454 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:31,454 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:18:31,454 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:18:31,454 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:18:31,454 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:31,455 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:18:31,455 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:18:31,456 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:31,458 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:31,458 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:31,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:31,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,460 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:31,461 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:31,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:31,462 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:31,462 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:31,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:31,463 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:31,464 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740 2024-11-21T00:18:31,465 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740 2024-11-21T00:18:31,466 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:31,466 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:31,467 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:31,468 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:31,469 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71125190, jitterRate=0.05984792113304138}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:31,469 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:18:31,470 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148311455Writing region info on filesystem at 1732148311455Initializing all the Stores at 1732148311456 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148311456Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148311456Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148311456Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148311456Cleaning up temporary data from old regions at 1732148311466 (+10 ms)Running coprocessor post-open hooks at 1732148311469 (+3 ms)Region opened successfully at 1732148311470 (+1 ms) 2024-11-21T00:18:31,471 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148311426 2024-11-21T00:18:31,474 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:18:31,474 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:18:31,475 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:31,476 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33987,1732148307875, state=OPEN 2024-11-21T00:18:31,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-1278390213/meta-region-server 2024-11-21T00:18:31,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-1278390213/meta-region-server 2024-11-21T00:18:31,495 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:31,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-1278390213/meta-region-server: CHANGED 2024-11-21T00:18:31,495 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-1278390213/meta-region-server: CHANGED 2024-11-21T00:18:31,498 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:18:31,498 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33987,1732148307875 in 222 msec 2024-11-21T00:18:31,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:18:31,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 439 msec 2024-11-21T00:18:31,502 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:31,502 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:18:31,504 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:31,504 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33987,1732148307875, seqNum=-1] 2024-11-21T00:18:31,504 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:31,506 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49097, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:31,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2970 sec 2024-11-21T00:18:31,514 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148311514, completionTime=-1 2024-11-21T00:18:31,514 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:18:31,514 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:18:31,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:18:31,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148371516 2024-11-21T00:18:31,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148431516 2024-11-21T00:18:31,517 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:18:31,517 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,39345,1732148307659-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:31,517 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,39345,1732148307659-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:31,517 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,39345,1732148307659-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:31,517 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:39345, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:31,517 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:31,517 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:31,519 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.563sec 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,39345,1732148307659-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:31,522 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,39345,1732148307659-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:18:31,525 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:18:31,525 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:18:31,525 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,39345,1732148307659-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:31,618 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31416843, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:31,618 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,39345,-1 for getting cluster id 2024-11-21T00:18:31,618 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:31,619 DEBUG [HMaster-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '86350b17-0c49-45fe-8c98-dea9d4adeacb' 2024-11-21T00:18:31,619 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:31,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "86350b17-0c49-45fe-8c98-dea9d4adeacb" 2024-11-21T00:18:31,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f3f7aea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:31,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,39345,-1] 2024-11-21T00:18:31,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:31,620 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:31,621 INFO [HMaster-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36590, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:31,622 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62ad3d06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:31,623 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:31,624 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33987,1732148307875, seqNum=-1] 2024-11-21T00:18:31,625 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:31,626 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:31,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:31,629 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:63439 2024-11-21T00:18:31,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:31,643 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015ac198060002 connected 2024-11-21T00:18:31,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.log.dir so I do NOT create it in target/test-data/9f038e24-5ca5-c776-8283-7f880668eced 2024-11-21T00:18:31,656 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:18:31,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.tmp.dir so I do NOT create it in target/test-data/9f038e24-5ca5-c776-8283-7f880668eced 2024-11-21T00:18:31,656 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:18:31,656 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced 2024-11-21T00:18:31,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:18:31,656 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/cluster_94bdb889-0d52-f7af-62c1-ee7d393540cc, deleteOnExit=true 2024-11-21T00:18:31,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/test.cache.data in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:18:31,657 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:31,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:18:31,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:18:31,811 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:31,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:31,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:32,037 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:32,044 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:32,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:32,056 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:32,056 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:18:32,058 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:32,060 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@201103c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:32,061 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3833aa3c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:32,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@195f4a04{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/java.io.tmpdir/jetty-localhost-37363-hadoop-hdfs-3_4_1-tests_jar-_-any-11921660584766260386/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:32,165 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@54197af4{HTTP/1.1, (http/1.1)}{localhost:37363} 2024-11-21T00:18:32,165 INFO [Time-limited test {}] server.Server(415): Started @46714ms 2024-11-21T00:18:32,447 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:32,451 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:32,452 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:32,452 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:32,452 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:18:32,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cbdedc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:32,454 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15c2233d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:32,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11a14b29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/java.io.tmpdir/jetty-localhost-39641-hadoop-hdfs-3_4_1-tests_jar-_-any-1550851327085594880/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:32,559 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7f7bfbaf{HTTP/1.1, (http/1.1)}{localhost:39641} 2024-11-21T00:18:32,559 INFO [Time-limited test {}] server.Server(415): Started @47108ms 2024-11-21T00:18:32,560 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:18:33,279 WARN [Thread-621 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/cluster_94bdb889-0d52-f7af-62c1-ee7d393540cc/data/data1/current/BP-1743846537-172.17.0.2-1732148311681/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:33,279 WARN [Thread-622 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/cluster_94bdb889-0d52-f7af-62c1-ee7d393540cc/data/data2/current/BP-1743846537-172.17.0.2-1732148311681/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:33,315 WARN [RS:0;5ed4808ef0e6:33987 {}] monitoring.TaskMonitor(166): Status Processing ServerCrashProcedure of 5ed4808ef0e6,44897,1732148276946: status=Processing ServerCrashProcedure of 5ed4808ef0e6,44897,1732148276946 current State SERVER_CRASH_CLAIM_REPLICATION_QUEUES, state=RUNNING, startTime=1732148295311, completionTime=-1 appears to have been leaked 2024-11-21T00:18:33,319 WARN [Thread-609 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:18:33,321 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x638862f02e45aad2 with lease ID 0x217fd965d4b76524: Processing first storage report for DS-9e34bbd6-8e12-485f-8338-796225595356 from datanode DatanodeRegistration(127.0.0.1:34603, datanodeUuid=a1c2f001-7a46-46bb-8143-15713f2f69a9, infoPort=43945, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=958379323;c=1732148311681) 2024-11-21T00:18:33,321 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x638862f02e45aad2 with lease ID 0x217fd965d4b76524: from storage DS-9e34bbd6-8e12-485f-8338-796225595356 node DatanodeRegistration(127.0.0.1:34603, datanodeUuid=a1c2f001-7a46-46bb-8143-15713f2f69a9, infoPort=43945, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=958379323;c=1732148311681), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:33,322 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x638862f02e45aad2 with lease ID 0x217fd965d4b76524: Processing first storage report for DS-f6831f8c-9b70-47b1-82d3-703f3e731801 from datanode DatanodeRegistration(127.0.0.1:34603, datanodeUuid=a1c2f001-7a46-46bb-8143-15713f2f69a9, infoPort=43945, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=958379323;c=1732148311681) 2024-11-21T00:18:33,322 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x638862f02e45aad2 with lease ID 0x217fd965d4b76524: from storage DS-f6831f8c-9b70-47b1-82d3-703f3e731801 node DatanodeRegistration(127.0.0.1:34603, datanodeUuid=a1c2f001-7a46-46bb-8143-15713f2f69a9, infoPort=43945, infoSecurePort=0, ipcPort=44325, storageInfo=lv=-57;cid=testClusterID;nsid=958379323;c=1732148311681), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:33,409 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced 2024-11-21T00:18:33,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:33,411 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:33,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:18:33,820 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9 with version=8 2024-11-21T00:18:33,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/hbase-staging 2024-11-21T00:18:33,822 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:33,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:33,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:33,822 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:33,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:33,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:33,822 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:18:33,822 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:33,823 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33443 2024-11-21T00:18:33,824 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33443 connecting to ZooKeeper ensemble=127.0.0.1:63439 2024-11-21T00:18:33,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:334430x0, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:33,874 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33443-0x1015ac198060003 connected 2024-11-21T00:18:33,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:33,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:33,951 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on znode that does not yet exist, /1-1865054975/running 2024-11-21T00:18:33,951 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9, hbase.cluster.distributed=false 2024-11-21T00:18:33,952 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on znode that does not yet exist, /1-1865054975/acl 2024-11-21T00:18:33,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33443 2024-11-21T00:18:33,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33443 2024-11-21T00:18:33,953 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33443 2024-11-21T00:18:33,954 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33443 2024-11-21T00:18:33,954 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33443 2024-11-21T00:18:33,968 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:33,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:33,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:33,968 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:33,968 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:33,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:33,969 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:18:33,969 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:33,969 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46261 2024-11-21T00:18:33,970 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46261 connecting to ZooKeeper ensemble=127.0.0.1:63439 2024-11-21T00:18:33,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:33,972 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:33,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462610x0, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:33,979 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:462610x0, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on znode that does not yet exist, /1-1865054975/running 2024-11-21T00:18:33,979 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46261-0x1015ac198060004 connected 2024-11-21T00:18:33,980 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:18:33,980 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:18:33,981 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on znode that does not yet exist, /1-1865054975/master 2024-11-21T00:18:33,982 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on znode that does not yet exist, /1-1865054975/acl 2024-11-21T00:18:33,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46261 2024-11-21T00:18:33,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46261 2024-11-21T00:18:33,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46261 2024-11-21T00:18:33,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46261 2024-11-21T00:18:33,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46261 2024-11-21T00:18:33,997 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:33443 2024-11-21T00:18:33,998 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-1865054975/backup-masters/5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:34,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975/backup-masters 2024-11-21T00:18:34,010 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975/backup-masters 2024-11-21T00:18:34,011 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on existing znode=/1-1865054975/backup-masters/5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:34,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1865054975/master 2024-11-21T00:18:34,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:34,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:34,021 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on existing znode=/1-1865054975/master 2024-11-21T00:18:34,022 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-1865054975/backup-masters/5ed4808ef0e6,33443,1732148313822 from backup master directory 2024-11-21T00:18:34,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1865054975/backup-masters/5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:34,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975/backup-masters 2024-11-21T00:18:34,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975/backup-masters 2024-11-21T00:18:34,031 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:34,031 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:34,036 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/hbase.id] with ID: 119ad92a-ca68-447b-84c6-511bf72b6cc6 2024-11-21T00:18:34,036 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/.tmp/hbase.id 2024-11-21T00:18:34,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:18:34,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/.tmp/hbase.id]:[hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/hbase.id] 2024-11-21T00:18:34,456 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:34,456 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:18:34,458 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-21T00:18:34,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:34,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:34,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:18:34,891 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:34,892 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:18:34,892 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:34,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:18:35,306 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store 2024-11-21T00:18:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:18:35,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:35,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:35,315 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:35,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:35,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:35,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:35,315 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:35,316 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148315315Disabling compacts and flushes for region at 1732148315315Disabling writes for close at 1732148315315Writing region close event to WAL at 1732148315315Closed at 1732148315315 2024-11-21T00:18:35,317 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/.initializing 2024-11-21T00:18:35,317 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/WALs/5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:35,318 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:35,320 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C33443%2C1732148313822, suffix=, logDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/WALs/5ed4808ef0e6,33443,1732148313822, archiveDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/oldWALs, maxLogs=10 2024-11-21T00:18:35,336 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/WALs/5ed4808ef0e6,33443,1732148313822/5ed4808ef0e6%2C33443%2C1732148313822.1732148315320, exclude list is [], retry=0 2024-11-21T00:18:35,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34603,DS-9e34bbd6-8e12-485f-8338-796225595356,DISK] 2024-11-21T00:18:35,361 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/WALs/5ed4808ef0e6,33443,1732148313822/5ed4808ef0e6%2C33443%2C1732148313822.1732148315320 2024-11-21T00:18:35,372 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43945:43945)] 2024-11-21T00:18:35,372 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:35,373 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:35,373 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,373 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:18:35,380 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:35,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:35,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,385 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:18:35,385 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:35,389 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:35,389 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,391 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:18:35,391 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:35,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:35,392 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,395 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:18:35,395 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:35,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:35,396 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,398 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,399 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,401 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,401 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,402 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:35,404 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:35,410 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:35,411 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68838747, jitterRate=0.025777265429496765}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:35,411 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148315373Initializing all the Stores at 1732148315375 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148315375Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148315375Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148315375Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148315375Cleaning up temporary data from old regions at 1732148315401 (+26 ms)Region opened successfully at 1732148315411 (+10 ms) 2024-11-21T00:18:35,412 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:18:35,417 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ef7e8a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:35,418 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:18:35,419 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:18:35,419 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:18:35,419 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:18:35,420 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:18:35,420 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:18:35,420 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:18:35,442 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:18:35,444 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Unable to get data of znode /1-1865054975/balancer because node does not exist (not necessarily an error) 2024-11-21T00:18:35,452 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1865054975/balancer already deleted, retry=false 2024-11-21T00:18:35,453 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:18:35,454 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Unable to get data of znode /1-1865054975/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:18:35,463 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1865054975/normalizer already deleted, retry=false 2024-11-21T00:18:35,464 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:18:35,466 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Unable to get data of znode /1-1865054975/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:18:35,473 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1865054975/switch/split already deleted, retry=false 2024-11-21T00:18:35,475 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Unable to get data of znode /1-1865054975/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:18:35,494 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1865054975/switch/merge already deleted, retry=false 2024-11-21T00:18:35,498 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Unable to get data of znode /1-1865054975/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:18:35,528 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1865054975/snapshot-cleanup already deleted, retry=false 2024-11-21T00:18:35,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1865054975/running 2024-11-21T00:18:35,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1865054975/running 2024-11-21T00:18:35,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:35,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:35,539 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,33443,1732148313822, sessionid=0x1015ac198060003, setting cluster-up flag (Was=false) 2024-11-21T00:18:35,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:35,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:35,589 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1865054975/flush-table-proc/acquired, /1-1865054975/flush-table-proc/reached, /1-1865054975/flush-table-proc/abort 2024-11-21T00:18:35,594 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:35,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:35,642 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1865054975/online-snapshot/acquired, /1-1865054975/online-snapshot/reached, /1-1865054975/online-snapshot/abort 2024-11-21T00:18:35,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:35,648 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:18:35,658 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:35,658 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:18:35,659 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:18:35,659 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,33443,1732148313822 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:18:35,673 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:35,674 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:35,674 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:35,674 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:35,674 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:18:35,678 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,678 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:35,678 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,689 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:35,689 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:18:35,691 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:35,691 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:18:35,721 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(746): ClusterId : 119ad92a-ca68-447b-84c6-511bf72b6cc6 2024-11-21T00:18:35,722 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:18:35,727 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:18:35,727 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:18:35,728 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148345728 2024-11-21T00:18:35,728 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:18:35,729 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:18:35,729 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:18:35,729 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:18:35,729 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:18:35,729 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:18:35,729 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,730 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:18:35,730 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:18:35,730 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:18:35,730 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:18:35,736 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:18:35,737 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:18:35,738 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:18:35,739 DEBUG [RS:0;5ed4808ef0e6:46261 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cbdefc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:35,748 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148315737,5,FailOnTimeoutGroup] 2024-11-21T00:18:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:18:35,772 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148315748,5,FailOnTimeoutGroup] 2024-11-21T00:18:35,772 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:18:35,772 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,772 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:18:35,772 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,773 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9 2024-11-21T00:18:35,773 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,788 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:46261 2024-11-21T00:18:35,788 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:18:35,788 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:18:35,788 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:18:35,790 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,33443,1732148313822 with port=46261, startcode=1732148313968 2024-11-21T00:18:35,790 DEBUG [RS:0;5ed4808ef0e6:46261 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:18:35,793 INFO [HMaster-EventLoopGroup-11-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50917, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:18:35,794 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33443 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:35,794 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33443 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:35,797 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9 2024-11-21T00:18:35,797 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40639 2024-11-21T00:18:35,797 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:18:35,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:18:35,861 DEBUG [RS:0;5ed4808ef0e6:46261 {}] zookeeper.ZKUtil(111): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on existing znode=/1-1865054975/rs/5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:35,861 WARN [RS:0;5ed4808ef0e6:46261 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:35,861 INFO [RS:0;5ed4808ef0e6:46261 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:35,862 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:35,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975/rs 2024-11-21T00:18:35,869 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,46261,1732148313968] 2024-11-21T00:18:35,878 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:18:35,885 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:18:35,897 INFO [RS:0;5ed4808ef0e6:46261 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:18:35,897 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,900 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:18:35,905 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:18:35,905 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,905 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:35,906 DEBUG [RS:0;5ed4808ef0e6:46261 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:35,920 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,920 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,920 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,920 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,921 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,921 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46261,1732148313968-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:35,947 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:18:35,947 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46261,1732148313968-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,948 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,948 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.Replication(171): 5ed4808ef0e6,46261,1732148313968 started 2024-11-21T00:18:35,971 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:35,971 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,46261,1732148313968, RpcServer on 5ed4808ef0e6/172.17.0.2:46261, sessionid=0x1015ac198060004 2024-11-21T00:18:35,971 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:18:35,971 DEBUG [RS:0;5ed4808ef0e6:46261 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:35,971 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,46261,1732148313968' 2024-11-21T00:18:35,971 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1865054975/flush-table-proc/abort' 2024-11-21T00:18:35,972 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1865054975/flush-table-proc/acquired' 2024-11-21T00:18:35,973 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:18:35,973 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:18:35,973 DEBUG [RS:0;5ed4808ef0e6:46261 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:35,973 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,46261,1732148313968' 2024-11-21T00:18:35,973 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1865054975/online-snapshot/abort' 2024-11-21T00:18:35,974 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1865054975/online-snapshot/acquired' 2024-11-21T00:18:35,975 DEBUG [RS:0;5ed4808ef0e6:46261 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:18:35,975 INFO [RS:0;5ed4808ef0e6:46261 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:18:35,975 INFO [RS:0;5ed4808ef0e6:46261 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:18:36,075 INFO [RS:0;5ed4808ef0e6:46261 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:36,077 INFO [RS:0;5ed4808ef0e6:46261 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C46261%2C1732148313968, suffix=, logDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968, archiveDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/oldWALs, maxLogs=10 2024-11-21T00:18:36,095 DEBUG [RS:0;5ed4808ef0e6:46261 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968/5ed4808ef0e6%2C46261%2C1732148313968.1732148316078, exclude list is [], retry=0 2024-11-21T00:18:36,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34603,DS-9e34bbd6-8e12-485f-8338-796225595356,DISK] 2024-11-21T00:18:36,107 INFO [RS:0;5ed4808ef0e6:46261 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968/5ed4808ef0e6%2C46261%2C1732148313968.1732148316078 2024-11-21T00:18:36,108 DEBUG [RS:0;5ed4808ef0e6:46261 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43945:43945)] 2024-11-21T00:18:36,218 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:36,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:36,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:36,227 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,228 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:36,232 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:36,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:36,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:36,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:36,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:36,240 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:36,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740 2024-11-21T00:18:36,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740 2024-11-21T00:18:36,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:36,247 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:36,248 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:36,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:36,254 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:36,254 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73910475, jitterRate=0.10135190188884735}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:36,255 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148316219Initializing all the Stores at 1732148316220 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148316220Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148316224 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148316224Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148316224Cleaning up temporary data from old regions at 1732148316247 (+23 ms)Region opened successfully at 1732148316255 (+8 ms) 2024-11-21T00:18:36,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:36,256 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:36,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:36,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:36,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:36,261 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:36,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148316256Disabling compacts and flushes for region at 1732148316256Disabling writes for close at 1732148316256Writing region close event to WAL at 1732148316261 (+5 ms)Closed at 1732148316261 2024-11-21T00:18:36,264 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:36,264 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:18:36,264 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:18:36,266 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:36,267 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:18:36,271 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:18:36,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:18:36,308 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T00:18:36,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:36,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver Metrics about HBase RegionObservers 2024-11-21T00:18:36,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:36,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T00:18:36,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:18:36,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T00:18:36,419 DEBUG [5ed4808ef0e6:33443 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:18:36,419 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:36,421 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,46261,1732148313968, state=OPENING 2024-11-21T00:18:36,431 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:18:36,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:36,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:36,442 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:36,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,46261,1732148313968}] 2024-11-21T00:18:36,443 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1865054975/meta-region-server: CHANGED 2024-11-21T00:18:36,443 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1865054975/meta-region-server: CHANGED 2024-11-21T00:18:36,605 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:18:36,613 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59137, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:18:36,624 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:18:36,624 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:36,625 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:18:36,627 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C46261%2C1732148313968.meta, suffix=.meta, logDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968, archiveDir=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/oldWALs, maxLogs=10 2024-11-21T00:18:36,644 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968/5ed4808ef0e6%2C46261%2C1732148313968.meta.1732148316627.meta, exclude list is [], retry=0 2024-11-21T00:18:36,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34603,DS-9e34bbd6-8e12-485f-8338-796225595356,DISK] 2024-11-21T00:18:36,659 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968/5ed4808ef0e6%2C46261%2C1732148313968.meta.1732148316627.meta 2024-11-21T00:18:36,659 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43945:43945)] 2024-11-21T00:18:36,659 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:36,660 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:36,660 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:36,660 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:18:36,660 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:18:36,660 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:18:36,660 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:36,661 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:18:36,661 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:18:36,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:36,665 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:36,665 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,666 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:36,667 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:36,667 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:36,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:36,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:36,671 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:36,671 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:36,672 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:36,673 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740 2024-11-21T00:18:36,675 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740 2024-11-21T00:18:36,676 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:36,676 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:36,677 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:36,679 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:36,680 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67577593, jitterRate=0.006984606385231018}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:36,680 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:18:36,681 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148316661Writing region info on filesystem at 1732148316661Initializing all the Stores at 1732148316662 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148316663 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148316663Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148316663Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148316663Cleaning up temporary data from old regions at 1732148316676 (+13 ms)Running coprocessor post-open hooks at 1732148316680 (+4 ms)Region opened successfully at 1732148316680 2024-11-21T00:18:36,682 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148316604 2024-11-21T00:18:36,685 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:18:36,685 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:18:36,686 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:36,688 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,46261,1732148313968, state=OPEN 2024-11-21T00:18:36,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1865054975/meta-region-server 2024-11-21T00:18:36,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1865054975/meta-region-server 2024-11-21T00:18:36,695 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1865054975/meta-region-server: CHANGED 2024-11-21T00:18:36,695 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1865054975/meta-region-server: CHANGED 2024-11-21T00:18:36,695 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:36,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:18:36,699 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,46261,1732148313968 in 252 msec 2024-11-21T00:18:36,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:18:36,705 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 435 msec 2024-11-21T00:18:36,706 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:36,706 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:18:36,708 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:36,708 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,46261,1732148313968, seqNum=-1] 2024-11-21T00:18:36,708 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:36,710 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35155, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:36,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0630 sec 2024-11-21T00:18:36,718 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148316718, completionTime=-1 2024-11-21T00:18:36,718 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:18:36,718 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:18:36,721 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:18:36,721 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148376721 2024-11-21T00:18:36,721 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148436721 2024-11-21T00:18:36,721 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:18:36,721 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33443,1732148313822-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:36,722 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33443,1732148313822-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:36,722 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33443,1732148313822-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:36,722 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:33443, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:36,722 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:36,722 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:36,724 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.695sec 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33443,1732148313822-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:36,727 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33443,1732148313822-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:18:36,730 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:18:36,730 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:18:36,730 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33443,1732148313822-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:36,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39adab36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,826 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,33443,-1 for getting cluster id 2024-11-21T00:18:36,826 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:36,827 DEBUG [HMaster-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '119ad92a-ca68-447b-84c6-511bf72b6cc6' 2024-11-21T00:18:36,828 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:36,828 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "119ad92a-ca68-447b-84c6-511bf72b6cc6" 2024-11-21T00:18:36,828 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77316a0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,828 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,33443,-1] 2024-11-21T00:18:36,829 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:36,829 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:36,830 INFO [HMaster-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36184, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:36,831 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e059c40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,831 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:36,833 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,46261,1732148313968, seqNum=-1] 2024-11-21T00:18:36,833 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:36,834 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42134, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:36,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:36,837 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:63439 2024-11-21T00:18:36,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:36,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015ac198060005 connected 2024-11-21T00:18:36,916 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6676c0dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,916 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,39345,-1 for getting cluster id 2024-11-21T00:18:36,917 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:36,921 DEBUG [HMaster-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '86350b17-0c49-45fe-8c98-dea9d4adeacb' 2024-11-21T00:18:36,924 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:36,924 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "86350b17-0c49-45fe-8c98-dea9d4adeacb" 2024-11-21T00:18:36,925 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff0dce6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,925 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,39345,-1] 2024-11-21T00:18:36,925 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:36,925 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:36,926 INFO [HMaster-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46976, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:36,927 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@733a20a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,928 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:36,930 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:36,930 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7c3e7b05 2024-11-21T00:18:36,930 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:36,932 INFO [HMaster-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46990, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:36,932 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:33443,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:18:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:18:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:36,947 DEBUG [PEWorker-3 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:33443' 2024-11-21T00:18:36,948 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@405cb99a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,948 DEBUG [PEWorker-3 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,33443,-1 for getting cluster id 2024-11-21T00:18:36,949 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:36,950 DEBUG [HMaster-EventLoopGroup-11-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '119ad92a-ca68-447b-84c6-511bf72b6cc6' 2024-11-21T00:18:36,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:36,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "119ad92a-ca68-447b-84c6-511bf72b6cc6" 2024-11-21T00:18:36,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@620f52bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,33443,-1] 2024-11-21T00:18:36,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:36,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:36,952 INFO [HMaster-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36204, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:36,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60655070, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:36,954 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:36,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:36,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@60c3dedd 2024-11-21T00:18:36,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:36,957 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:36,958 INFO [HMaster-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36214, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:36,959 INFO [PEWorker-3 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-3. 2024-11-21T00:18:36,959 DEBUG [PEWorker-3 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:18:36,960 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:36,960 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:36,960 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:36,960 INFO [PEWorker-3 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:36,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:36,969 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1139): Stored pid=5, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:18:36,971 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:18:36,971 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:36,973 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:18:36,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741835_1011 (size=1138) 2024-11-21T00:18:37,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:37,019 DEBUG [PEWorker-3 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:18:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:37,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:37,397 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ebc65f790c7dd1a0db34ca1aa77c5739, NAME => 'hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182 2024-11-21T00:18:37,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741836_1012 (size=44) 2024-11-21T00:18:37,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:37,804 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:37,804 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing ebc65f790c7dd1a0db34ca1aa77c5739, disabling compactions & flushes 2024-11-21T00:18:37,804 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:37,804 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:37,804 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. after waiting 0 ms 2024-11-21T00:18:37,804 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:37,804 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:37,804 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for ebc65f790c7dd1a0db34ca1aa77c5739: Waiting for close lock at 1732148317804Disabling compacts and flushes for region at 1732148317804Disabling writes for close at 1732148317804Writing region close event to WAL at 1732148317804Closed at 1732148317804 2024-11-21T00:18:37,806 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:18:37,806 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148317806"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148317806"}]},"ts":"1732148317806"} 2024-11-21T00:18:37,808 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:18:37,809 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:18:37,809 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148317809"}]},"ts":"1732148317809"} 2024-11-21T00:18:37,811 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:18:37,811 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=ebc65f790c7dd1a0db34ca1aa77c5739, ASSIGN}] 2024-11-21T00:18:37,813 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=ebc65f790c7dd1a0db34ca1aa77c5739, ASSIGN 2024-11-21T00:18:37,814 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=ebc65f790c7dd1a0db34ca1aa77c5739, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,33987,1732148307875; forceNewPlan=false, retain=false 2024-11-21T00:18:37,964 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=6 updating hbase:meta row=ebc65f790c7dd1a0db34ca1aa77c5739, regionState=OPENING, regionLocation=5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:37,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=ebc65f790c7dd1a0db34ca1aa77c5739, ASSIGN because future has completed 2024-11-21T00:18:37,967 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=7, ppid=6, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebc65f790c7dd1a0db34ca1aa77c5739, server=5ed4808ef0e6,33987,1732148307875}] 2024-11-21T00:18:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:38,124 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:38,124 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:38,124 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:18:38,126 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33987%2C1732148307875.rep, suffix=, logDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875, archiveDir=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/oldWALs, maxLogs=10 2024-11-21T00:18:38,138 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.rep.1732148318126, exclude list is [], retry=0 2024-11-21T00:18:38,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42325,DS-134b1c38-75d0-4fc3-a900-cf185ec4c3a6,DISK] 2024-11-21T00:18:38,142 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.rep.1732148318126 2024-11-21T00:18:38,143 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46535:46535)] 2024-11-21T00:18:38,143 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7752): Opening region: {ENCODED => ebc65f790c7dd1a0db34ca1aa77c5739, NAME => 'hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:38,143 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:38,143 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:38,143 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. service=MultiRowMutationService 2024-11-21T00:18:38,143 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:18:38,144 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,144 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:38,144 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7794): checking encryption for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,144 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7797): checking classloading for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,145 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,147 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ebc65f790c7dd1a0db34ca1aa77c5739 columnFamilyName hfileref 2024-11-21T00:18:38,147 DEBUG [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:38,147 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] regionserver.HStore(327): Store=ebc65f790c7dd1a0db34ca1aa77c5739/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:38,147 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,149 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ebc65f790c7dd1a0db34ca1aa77c5739 columnFamilyName queue 2024-11-21T00:18:38,149 DEBUG [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:38,149 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] regionserver.HStore(327): Store=ebc65f790c7dd1a0db34ca1aa77c5739/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:38,149 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,151 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ebc65f790c7dd1a0db34ca1aa77c5739 columnFamilyName sid 2024-11-21T00:18:38,151 DEBUG [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:38,151 INFO [StoreOpener-ebc65f790c7dd1a0db34ca1aa77c5739-1 {}] regionserver.HStore(327): Store=ebc65f790c7dd1a0db34ca1aa77c5739/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:38,151 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1038): replaying wal for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,152 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,153 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,154 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1048): stopping wal replay for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,154 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1060): Cleaning up temporary data for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,154 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:18:38,155 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1093): writing seq id for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,157 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:38,158 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1114): Opened ebc65f790c7dd1a0db34ca1aa77c5739; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60518406, jitterRate=-0.09820547699928284}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:18:38,158 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:38,158 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1006): Region open journal for ebc65f790c7dd1a0db34ca1aa77c5739: Running coprocessor pre-open hook at 1732148318144Writing region info on filesystem at 1732148318144Initializing all the Stores at 1732148318145 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148318145Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148318145Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148318145Cleaning up temporary data from old regions at 1732148318154 (+9 ms)Running coprocessor post-open hooks at 1732148318158 (+4 ms)Region opened successfully at 1732148318158 2024-11-21T00:18:38,159 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739., pid=7, masterSystemTime=1732148318120 2024-11-21T00:18:38,161 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:38,161 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:38,162 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=6 updating hbase:meta row=ebc65f790c7dd1a0db34ca1aa77c5739, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:38,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, ppid=6, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebc65f790c7dd1a0db34ca1aa77c5739, server=5ed4808ef0e6,33987,1732148307875 because future has completed 2024-11-21T00:18:38,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=7, resume processing ppid=6 2024-11-21T00:18:38,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, ppid=6, state=SUCCESS, hasLock=false; OpenRegionProcedure ebc65f790c7dd1a0db34ca1aa77c5739, server=5ed4808ef0e6,33987,1732148307875 in 198 msec 2024-11-21T00:18:38,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:18:38,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=ebc65f790c7dd1a0db34ca1aa77c5739, ASSIGN in 356 msec 2024-11-21T00:18:38,171 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:18:38,171 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148318171"}]},"ts":"1732148318171"} 2024-11-21T00:18:38,173 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:18:38,175 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:18:38,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 1.2150 sec 2024-11-21T00:18:38,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739., hostname=5ed4808ef0e6,33987,1732148307875, seqNum=2] 2024-11-21T00:18:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:38,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:38,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=4, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:18:38,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33987 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=8 2024-11-21T00:18:38,471 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:18:38,502 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,33987,1732148307875, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:18:38,503 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:38,503 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33987,1732148307875, seqNum=-1] 2024-11-21T00:18:38,504 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:38,505 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45513, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=ClientService 2024-11-21T00:18:38,508 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,33987,1732148307875', locateType=CURRENT is [region=hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739., hostname=5ed4808ef0e6,33987,1732148307875, seqNum=2] 2024-11-21T00:18:38,511 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-21T00:18:38,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-21T00:18:38,515 INFO [PEWorker-1 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,33987,1732148307875 suceeded 2024-11-21T00:18:38,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=4 2024-11-21T00:18:38,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 198 msec 2024-11-21T00:18:38,522 INFO [PEWorker-2 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:33443,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:18:38,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.5900 sec 2024-11-21T00:18:38,531 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:33443' 2024-11-21T00:18:38,533 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@7d517026, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:38,533 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,33443,-1 for getting cluster id 2024-11-21T00:18:38,533 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:38,534 DEBUG [HMaster-EventLoopGroup-11-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '119ad92a-ca68-447b-84c6-511bf72b6cc6' 2024-11-21T00:18:38,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:38,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "119ad92a-ca68-447b-84c6-511bf72b6cc6" 2024-11-21T00:18:38,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@467615c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:38,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,33443,-1] 2024-11-21T00:18:38,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:38,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:38,536 INFO [HMaster-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36232, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:38,537 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@4a549092, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:38,537 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:38,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:38,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@68e8adfe 2024-11-21T00:18:38,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-12-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:38,539 INFO [HMaster-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36246, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=MasterService 2024-11-21T00:18:38,539 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,33987,1732148307875 (queues=1) is replicating from cluster=86350b17-0c49-45fe-8c98-dea9d4adeacb to cluster=119ad92a-ca68-447b-84c6-511bf72b6cc6 2024-11-21T00:18:38,539 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C33987%2C1732148307875 2024-11-21T00:18:38,539 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,33987,1732148307875, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:18:38,540 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.shipper5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C33987%2C1732148307875 2024-11-21T00:18:38,540 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440, startPosition=0, beingWritten=true 2024-11-21T00:18:38,746 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:39,052 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:39,098 INFO [RPCClient-NioEventLoopGroup-4-12 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:18:39,098 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:39,098 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer(TestMasterReplication.java:560) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:39,098 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:39,098 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:39,098 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:39,098 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:39,099 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:39,099 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4b22fe24 2024-11-21T00:18:39,100 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:39,101 INFO [HMaster-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:39,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:18:39,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:18:39,102 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:39,102 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer(TestMasterReplication.java:566) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:39,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:39,102 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:39,102 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:39,102 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:18:39,102 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1330017833, stopped=false 2024-11-21T00:18:39,102 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,33443,1732148313822 2024-11-21T00:18:39,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1865054975/running 2024-11-21T00:18:39,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1865054975/running 2024-11-21T00:18:39,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:39,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:39,116 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:39,116 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:39,116 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer(TestMasterReplication.java:566) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:39,116 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:39,116 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,46261,1732148313968' ***** 2024-11-21T00:18:39,116 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:18:39,116 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:18:39,116 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:18:39,116 INFO [RS:0;5ed4808ef0e6:46261 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:18:39,116 INFO [RS:0;5ed4808ef0e6:46261 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:18:39,116 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:39,117 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:39,117 INFO [RS:0;5ed4808ef0e6:46261 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:46261. 2024-11-21T00:18:39,117 DEBUG [RS:0;5ed4808ef0e6:46261 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:39,117 DEBUG [RS:0;5ed4808ef0e6:46261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:39,117 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:18:39,117 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:18:39,117 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:18:39,117 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:18:39,117 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:18:39,117 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:18:39,117 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:39,117 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:39,117 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:39,117 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on znode that does not yet exist, /1-1865054975/running 2024-11-21T00:18:39,117 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Set watcher on znode that does not yet exist, /1-1865054975/running 2024-11-21T00:18:39,117 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:39,117 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:39,118 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:39,118 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-21T00:18:39,133 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740/.tmp/ns/338242fe93674325ab1d2cf175124c0e is 43, key is default/ns:d/1732148316710/Put/seqid=0 2024-11-21T00:18:39,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741835_1011 (size=5153) 2024-11-21T00:18:39,137 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:39,317 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:39,457 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:39,518 DEBUG [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:39,538 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740/.tmp/ns/338242fe93674325ab1d2cf175124c0e 2024-11-21T00:18:39,550 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740/.tmp/ns/338242fe93674325ab1d2cf175124c0e as hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740/ns/338242fe93674325ab1d2cf175124c0e 2024-11-21T00:18:39,564 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740/ns/338242fe93674325ab1d2cf175124c0e, entries=2, sequenceid=6, filesize=5.0 K 2024-11-21T00:18:39,565 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 447ms, sequenceid=6, compaction requested=false 2024-11-21T00:18:39,570 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T00:18:39,571 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:39,571 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:39,571 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:39,571 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148319117Running coprocessor pre-close hooks at 1732148319117Disabling compacts and flushes for region at 1732148319117Disabling writes for close at 1732148319118 (+1 ms)Obtaining lock to block concurrent updates at 1732148319118Preparing flush snapshotting stores in 1588230740 at 1732148319118Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732148319118Flushing stores of hbase:meta,,1.1588230740 at 1732148319119 (+1 ms)Flushing 1588230740/ns: creating writer at 1732148319119Flushing 1588230740/ns: appending metadata at 1732148319132 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732148319132Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4909cc96: reopening flushed file at 1732148319549 (+417 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 447ms, sequenceid=6, compaction requested=false at 1732148319565 (+16 ms)Writing region close event to WAL at 1732148319566 (+1 ms)Running coprocessor post-close hooks at 1732148319571 (+5 ms)Closed at 1732148319571 2024-11-21T00:18:39,571 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:39,718 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,46261,1732148313968; all regions closed. 2024-11-21T00:18:39,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741834_1010 (size=1152) 2024-11-21T00:18:39,721 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/WALs/5ed4808ef0e6,46261,1732148313968/5ed4808ef0e6%2C46261%2C1732148313968.meta.1732148316627.meta not finished, retry = 0 2024-11-21T00:18:39,823 DEBUG [RS:0;5ed4808ef0e6:46261 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/oldWALs 2024-11-21T00:18:39,823 INFO [RS:0;5ed4808ef0e6:46261 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C46261%2C1732148313968.meta:.meta(num 1732148316627) 2024-11-21T00:18:39,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741833_1009 (size=93) 2024-11-21T00:18:39,827 DEBUG [RS:0;5ed4808ef0e6:46261 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/oldWALs 2024-11-21T00:18:39,827 INFO [RS:0;5ed4808ef0e6:46261 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C46261%2C1732148313968:(num 1732148316078) 2024-11-21T00:18:39,827 DEBUG [RS:0;5ed4808ef0e6:46261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:39,827 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:39,828 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:39,828 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:39,828 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:39,828 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:39,828 INFO [RS:0;5ed4808ef0e6:46261 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46261 2024-11-21T00:18:39,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1865054975/rs/5ed4808ef0e6,46261,1732148313968 2024-11-21T00:18:39,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975/rs 2024-11-21T00:18:39,841 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:39,852 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,46261,1732148313968] 2024-11-21T00:18:39,862 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-1865054975/draining/5ed4808ef0e6,46261,1732148313968 already deleted, retry=false 2024-11-21T00:18:39,863 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,46261,1732148313968 expired; onlineServers=0 2024-11-21T00:18:39,863 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,33443,1732148313822' ***** 2024-11-21T00:18:39,863 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:18:39,863 INFO [M:0;5ed4808ef0e6:33443 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:39,863 INFO [M:0;5ed4808ef0e6:33443 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:39,863 DEBUG [M:0;5ed4808ef0e6:33443 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:18:39,863 DEBUG [M:0;5ed4808ef0e6:33443 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:18:39,863 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:18:39,863 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148315737 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148315737,5,FailOnTimeoutGroup] 2024-11-21T00:18:39,863 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148315748 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148315748,5,FailOnTimeoutGroup] 2024-11-21T00:18:39,863 INFO [M:0;5ed4808ef0e6:33443 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:39,863 INFO [M:0;5ed4808ef0e6:33443 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:39,863 DEBUG [M:0;5ed4808ef0e6:33443 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:18:39,863 INFO [M:0;5ed4808ef0e6:33443 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:18:39,864 INFO [M:0;5ed4808ef0e6:33443 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:39,864 INFO [M:0;5ed4808ef0e6:33443 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:18:39,864 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:18:39,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1865054975/master 2024-11-21T00:18:39,873 DEBUG [M:0;5ed4808ef0e6:33443 {}] zookeeper.ZKUtil(347): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Unable to get data of znode /1-1865054975/master because node does not exist (not an error) 2024-11-21T00:18:39,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1865054975 2024-11-21T00:18:39,873 WARN [M:0;5ed4808ef0e6:33443 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:18:39,874 INFO [M:0;5ed4808ef0e6:33443 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/.lastflushedseqids 2024-11-21T00:18:39,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741836_1012 (size=99) 2024-11-21T00:18:39,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:39,952 INFO [RS:0;5ed4808ef0e6:46261 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:39,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46261-0x1015ac198060004, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:39,952 INFO [RS:0;5ed4808ef0e6:46261 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,46261,1732148313968; zookeeper connection closed. 2024-11-21T00:18:39,953 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6d59fd5f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6d59fd5f 2024-11-21T00:18:39,953 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:18:39,963 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:40,279 INFO [M:0;5ed4808ef0e6:33443 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:18:40,279 INFO [M:0;5ed4808ef0e6:33443 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:18:40,279 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:40,279 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:40,279 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:40,279 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:40,279 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:40,279 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.62 KB heapSize=11.22 KB 2024-11-21T00:18:40,296 DEBUG [M:0;5ed4808ef0e6:33443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b7c31fd6fe314e7eb5b626ada11db4fc is 82, key is hbase:meta,,1/info:regioninfo/1732148316686/Put/seqid=0 2024-11-21T00:18:40,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741837_1013 (size=5672) 2024-11-21T00:18:40,568 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:40,701 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b7c31fd6fe314e7eb5b626ada11db4fc 2024-11-21T00:18:40,727 DEBUG [M:0;5ed4808ef0e6:33443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2efb4ab4ce1346e7b242b41a71dff892 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732148316717/Put/seqid=0 2024-11-21T00:18:40,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741838_1014 (size=5275) 2024-11-21T00:18:41,133 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2efb4ab4ce1346e7b242b41a71dff892 2024-11-21T00:18:41,156 DEBUG [M:0;5ed4808ef0e6:33443 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1576fec456114328aef94f9384a8a4b3 is 69, key is 5ed4808ef0e6,46261,1732148313968/rs:state/1732148315794/Put/seqid=0 2024-11-21T00:18:41,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741839_1015 (size=5156) 2024-11-21T00:18:41,273 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:41,561 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1576fec456114328aef94f9384a8a4b3 2024-11-21T00:18:41,569 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b7c31fd6fe314e7eb5b626ada11db4fc as hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b7c31fd6fe314e7eb5b626ada11db4fc 2024-11-21T00:18:41,575 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b7c31fd6fe314e7eb5b626ada11db4fc, entries=8, sequenceid=28, filesize=5.5 K 2024-11-21T00:18:41,576 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2efb4ab4ce1346e7b242b41a71dff892 as hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2efb4ab4ce1346e7b242b41a71dff892 2024-11-21T00:18:41,582 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2efb4ab4ce1346e7b242b41a71dff892, entries=3, sequenceid=28, filesize=5.2 K 2024-11-21T00:18:41,583 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1576fec456114328aef94f9384a8a4b3 as hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1576fec456114328aef94f9384a8a4b3 2024-11-21T00:18:41,589 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40639/user/jenkins/test-data/8ceaa33c-f54b-a83c-de77-40fa278f4bd9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1576fec456114328aef94f9384a8a4b3, entries=1, sequenceid=28, filesize=5.0 K 2024-11-21T00:18:41,591 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1312ms, sequenceid=28, compaction requested=false 2024-11-21T00:18:41,592 INFO [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:41,592 DEBUG [M:0;5ed4808ef0e6:33443 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148320279Disabling compacts and flushes for region at 1732148320279Disabling writes for close at 1732148320279Obtaining lock to block concurrent updates at 1732148320279Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148320279Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7802, getHeapSize=11424, getOffHeapSize=0, getCellsCount=35 at 1732148320280 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148320281 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148320281Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148320296 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148320296Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148320710 (+414 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148320727 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148320727Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148321139 (+412 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148321156 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148321156Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6adb1349: reopening flushed file at 1732148321568 (+412 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ec51021: reopening flushed file at 1732148321575 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@676ce66e: reopening flushed file at 1732148321582 (+7 ms)Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1312ms, sequenceid=28, compaction requested=false at 1732148321591 (+9 ms)Writing region close event to WAL at 1732148321592 (+1 ms)Closed at 1732148321592 2024-11-21T00:18:41,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34603 is added to blk_1073741830_1006 (size=10165) 2024-11-21T00:18:41,595 INFO [M:0;5ed4808ef0e6:33443 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:18:41,595 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:41,595 INFO [M:0;5ed4808ef0e6:33443 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33443 2024-11-21T00:18:41,596 INFO [M:0;5ed4808ef0e6:33443 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:41,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:41,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33443-0x1015ac198060003, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:41,711 INFO [M:0;5ed4808ef0e6:33443 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:41,751 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11a14b29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:41,752 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7f7bfbaf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:41,752 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:41,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15c2233d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:41,752 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cbdedc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:41,755 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:18:41,755 WARN [BP-1743846537-172.17.0.2-1732148311681 heartbeating to localhost/127.0.0.1:40639 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:18:41,755 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:18:41,755 WARN [BP-1743846537-172.17.0.2-1732148311681 heartbeating to localhost/127.0.0.1:40639 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1743846537-172.17.0.2-1732148311681 (Datanode Uuid a1c2f001-7a46-46bb-8143-15713f2f69a9) service to localhost/127.0.0.1:40639 2024-11-21T00:18:41,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/cluster_94bdb889-0d52-f7af-62c1-ee7d393540cc/data/data1/current/BP-1743846537-172.17.0.2-1732148311681 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:41,756 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/cluster_94bdb889-0d52-f7af-62c1-ee7d393540cc/data/data2/current/BP-1743846537-172.17.0.2-1732148311681 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:41,756 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:18:41,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@195f4a04{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:41,763 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@54197af4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:41,763 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:41,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3833aa3c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:41,763 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@201103c7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:41,778 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:18:41,779 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:18:41,779 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:41,779 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer(TestMasterReplication.java:566) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:41,779 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:41,779 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:41,779 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:41,779 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:18:41,779 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1177918031, stopped=false 2024-11-21T00:18:41,780 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,39345,1732148307659 2024-11-21T00:18:41,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-1278390213/running 2024-11-21T00:18:41,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-1278390213/running 2024-11-21T00:18:41,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:41,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:41,799 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:41,800 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:18:41,800 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer(TestMasterReplication.java:566) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:41,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:41,800 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on znode that does not yet exist, /0-1278390213/running 2024-11-21T00:18:41,800 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,33987,1732148307875' ***** 2024-11-21T00:18:41,800 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:18:41,800 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Set watcher on znode that does not yet exist, /0-1278390213/running 2024-11-21T00:18:41,800 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:18:41,801 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(3091): Received CLOSE for ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33987. 2024-11-21T00:18:41,801 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ebc65f790c7dd1a0db34ca1aa77c5739, disabling compactions & flushes 2024-11-21T00:18:41,801 DEBUG [RS:0;5ed4808ef0e6:33987 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:41,801 DEBUG [RS:0;5ed4808ef0e6:33987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:41,801 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:41,801 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:18:41,801 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. after waiting 0 ms 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:18:41,801 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:18:41,801 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ebc65f790c7dd1a0db34ca1aa77c5739 3/3 column families, dataSize=147 B heapSize=992 B 2024-11-21T00:18:41,801 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:18:41,802 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:18:41,802 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1325): Online Regions={ebc65f790c7dd1a0db34ca1aa77c5739=hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:18:41,802 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:41,802 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:41,802 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:41,802 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:41,802 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:41,802 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:41,802 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-21T00:18:41,819 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739/.tmp/queue/92a197dfec8841ebbccf49acd2049792 is 151, key is 1-5ed4808ef0e6,33987,1732148307875/queue:5ed4808ef0e6%2C33987%2C1732148307875/1732148318509/Put/seqid=0 2024-11-21T00:18:41,820 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/info/c88ba522874e42d8bc45504dde17b47b is 147, key is hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739./info:regioninfo/1732148318162/Put/seqid=0 2024-11-21T00:18:41,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741839_1015 (size=6631) 2024-11-21T00:18:41,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741838_1014 (size=5350) 2024-11-21T00:18:41,826 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.17 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/info/c88ba522874e42d8bc45504dde17b47b 2024-11-21T00:18:41,829 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:41,846 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/ns/b67598b0ab6d4fa3998a616063581b86 is 43, key is default/ns:d/1732148311506/Put/seqid=0 2024-11-21T00:18:41,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741840_1016 (size=5153) 2024-11-21T00:18:42,002 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:42,078 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:42,163 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:42,165 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:42,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:42,197 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:42,202 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ebc65f790c7dd1a0db34ca1aa77c5739 2024-11-21T00:18:42,226 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=147 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739/.tmp/queue/92a197dfec8841ebbccf49acd2049792 2024-11-21T00:18:42,233 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739/.tmp/queue/92a197dfec8841ebbccf49acd2049792 as hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739/queue/92a197dfec8841ebbccf49acd2049792 2024-11-21T00:18:42,241 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739/queue/92a197dfec8841ebbccf49acd2049792, entries=1, sequenceid=5, filesize=5.2 K 2024-11-21T00:18:42,243 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~147 B/147, heapSize ~464 B/464, currentSize=0 B/0 for ebc65f790c7dd1a0db34ca1aa77c5739 in 442ms, sequenceid=5, compaction requested=false 2024-11-21T00:18:42,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:18:42,252 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/ns/b67598b0ab6d4fa3998a616063581b86 2024-11-21T00:18:42,254 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/replication/ebc65f790c7dd1a0db34ca1aa77c5739/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T00:18:42,255 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:42,255 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:42,256 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:42,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ebc65f790c7dd1a0db34ca1aa77c5739: Waiting for close lock at 1732148321801Running coprocessor pre-close hooks at 1732148321801Disabling compacts and flushes for region at 1732148321801Disabling writes for close at 1732148321801Obtaining lock to block concurrent updates at 1732148321801Preparing flush snapshotting stores in ebc65f790c7dd1a0db34ca1aa77c5739 at 1732148321801Finished memstore snapshotting hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739., syncing WAL and waiting on mvcc, flushsize=dataSize=147, getHeapSize=944, getOffHeapSize=0, getCellsCount=1 at 1732148321802 (+1 ms)Flushing stores of hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. at 1732148321802Flushing ebc65f790c7dd1a0db34ca1aa77c5739/queue: creating writer at 1732148321802Flushing ebc65f790c7dd1a0db34ca1aa77c5739/queue: appending metadata at 1732148321818 (+16 ms)Flushing ebc65f790c7dd1a0db34ca1aa77c5739/queue: closing flushed file at 1732148321818Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@270c289e: reopening flushed file at 1732148322232 (+414 ms)Finished flush of dataSize ~147 B/147, heapSize ~464 B/464, currentSize=0 B/0 for ebc65f790c7dd1a0db34ca1aa77c5739 in 442ms, sequenceid=5, compaction requested=false at 1732148322243 (+11 ms)Writing region close event to WAL at 1732148322248 (+5 ms)Running coprocessor post-close hooks at 1732148322255 (+7 ms)Closed at 1732148322256 (+1 ms) 2024-11-21T00:18:42,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148316960.ebc65f790c7dd1a0db34ca1aa77c5739. 2024-11-21T00:18:42,282 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/table/97282e58ede945cf8201e924c4c30cde is 53, key is hbase:replication/table:state/1732148318171/Put/seqid=0 2024-11-21T00:18:42,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741841_1017 (size=5256) 2024-11-21T00:18:42,355 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:18:42,355 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:18:42,402 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:42,603 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:42,701 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=98 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/table/97282e58ede945cf8201e924c4c30cde 2024-11-21T00:18:42,717 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/info/c88ba522874e42d8bc45504dde17b47b as hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/info/c88ba522874e42d8bc45504dde17b47b 2024-11-21T00:18:42,732 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/info/c88ba522874e42d8bc45504dde17b47b, entries=10, sequenceid=11, filesize=6.5 K 2024-11-21T00:18:42,742 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/ns/b67598b0ab6d4fa3998a616063581b86 as hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/ns/b67598b0ab6d4fa3998a616063581b86 2024-11-21T00:18:42,763 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/ns/b67598b0ab6d4fa3998a616063581b86, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:18:42,766 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/.tmp/table/97282e58ede945cf8201e924c4c30cde as hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/table/97282e58ede945cf8201e924c4c30cde 2024-11-21T00:18:42,778 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/table/97282e58ede945cf8201e924c4c30cde, entries=2, sequenceid=11, filesize=5.1 K 2024-11-21T00:18:42,779 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1368, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 977ms, sequenceid=11, compaction requested=false 2024-11-21T00:18:42,808 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:18:42,808 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:18:42,808 DEBUG [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:18:42,812 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T00:18:42,814 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:42,814 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:42,814 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:42,815 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148321802Running coprocessor pre-close hooks at 1732148321802Disabling compacts and flushes for region at 1732148321802Disabling writes for close at 1732148321802Obtaining lock to block concurrent updates at 1732148321802Preparing flush snapshotting stores in 1588230740 at 1732148321802Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1368, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732148321802Flushing stores of hbase:meta,,1.1588230740 at 1732148321803 (+1 ms)Flushing 1588230740/info: creating writer at 1732148321803Flushing 1588230740/info: appending metadata at 1732148321819 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732148321819Flushing 1588230740/ns: creating writer at 1732148321832 (+13 ms)Flushing 1588230740/ns: appending metadata at 1732148321845 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732148321845Flushing 1588230740/table: creating writer at 1732148322261 (+416 ms)Flushing 1588230740/table: appending metadata at 1732148322282 (+21 ms)Flushing 1588230740/table: closing flushed file at 1732148322282Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f79e7e7: reopening flushed file at 1732148322715 (+433 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65bf0611: reopening flushed file at 1732148322733 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e6a615e: reopening flushed file at 1732148322763 (+30 ms)Finished flush of dataSize ~1.34 KB/1368, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 977ms, sequenceid=11, compaction requested=false at 1732148322779 (+16 ms)Writing region close event to WAL at 1732148322800 (+21 ms)Running coprocessor post-close hooks at 1732148322814 (+14 ms)Closed at 1732148322814 2024-11-21T00:18:42,815 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:42,986 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 to pos 0, reset compression=false 2024-11-21T00:18:43,008 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,33987,1732148307875; all regions closed. 2024-11-21T00:18:43,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741834_1010 (size=2742) 2024-11-21T00:18:43,011 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.meta.1732148311435.meta not finished, retry = 0 2024-11-21T00:18:43,119 DEBUG [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/oldWALs 2024-11-21T00:18:43,119 INFO [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C33987%2C1732148307875.meta:.meta(num 1732148311435) 2024-11-21T00:18:43,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741837_1013 (size=1586) 2024-11-21T00:18:43,132 DEBUG [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/oldWALs 2024-11-21T00:18:43,132 INFO [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C33987%2C1732148307875.rep:(num 1732148318126) 2024-11-21T00:18:43,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741832_1008 (size=93) 2024-11-21T00:18:43,139 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/WALs/5ed4808ef0e6,33987,1732148307875/5ed4808ef0e6%2C33987%2C1732148307875.1732148310440 not finished, retry = 0 2024-11-21T00:18:43,248 DEBUG [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/oldWALs 2024-11-21T00:18:43,248 INFO [RS:0;5ed4808ef0e6:33987 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C33987%2C1732148307875:(num 1732148310440) 2024-11-21T00:18:43,248 DEBUG [RS:0;5ed4808ef0e6:33987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:43,248 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:18:43,248 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:43,248 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:43,249 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:43,249 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:43,249 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,33987,1732148307875 because: Region server is closing 2024-11-21T00:18:43,249 INFO [RS:0;5ed4808ef0e6:33987 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33987. 2024-11-21T00:18:43,249 DEBUG [RS:0;5ed4808ef0e6:33987 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:18:43,249 DEBUG [RS:0;5ed4808ef0e6:33987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:43,249 DEBUG [RS:0;5ed4808ef0e6:33987 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:43,249 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:18:43,350 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.shipper5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:18:43,350 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.wal-reader.5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:18:43,350 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33987,1732148307875.replicationSource.shipper5ed4808ef0e6%2C33987%2C1732148307875,1-5ed4808ef0e6,33987,1732148307875 terminated 2024-11-21T00:18:43,350 INFO [RS:0;5ed4808ef0e6:33987 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33987 2024-11-21T00:18:43,453 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:43,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-1278390213/rs/5ed4808ef0e6,33987,1732148307875 2024-11-21T00:18:43,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213/rs 2024-11-21T00:18:43,464 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,33987,1732148307875] 2024-11-21T00:18:43,476 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /0-1278390213/draining/5ed4808ef0e6,33987,1732148307875 already deleted, retry=false 2024-11-21T00:18:43,477 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,33987,1732148307875 expired; onlineServers=0 2024-11-21T00:18:43,477 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,39345,1732148307659' ***** 2024-11-21T00:18:43,477 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:18:43,477 INFO [M:0;5ed4808ef0e6:39345 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:18:43,477 INFO [M:0;5ed4808ef0e6:39345 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:18:43,477 DEBUG [M:0;5ed4808ef0e6:39345 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:18:43,477 DEBUG [M:0;5ed4808ef0e6:39345 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:18:43,477 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:18:43,478 INFO [M:0;5ed4808ef0e6:39345 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:18:43,478 INFO [M:0;5ed4808ef0e6:39345 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:18:43,478 DEBUG [M:0;5ed4808ef0e6:39345 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:18:43,478 INFO [M:0;5ed4808ef0e6:39345 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:18:43,478 INFO [M:0;5ed4808ef0e6:39345 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:18:43,478 INFO [M:0;5ed4808ef0e6:39345 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:18:43,479 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148310220 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148310220,5,FailOnTimeoutGroup] 2024-11-21T00:18:43,479 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148310220 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148310220,5,FailOnTimeoutGroup] 2024-11-21T00:18:43,479 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:18:43,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-1278390213/master 2024-11-21T00:18:43,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-1278390213 2024-11-21T00:18:43,508 DEBUG [M:0;5ed4808ef0e6:39345 {}] zookeeper.RecoverableZooKeeper(212): Node /0-1278390213/master already deleted, retry=false 2024-11-21T00:18:43,508 DEBUG [M:0;5ed4808ef0e6:39345 {}] master.ActiveMasterManager(353): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Failed delete of our master address node; KeeperErrorCode = NoNode for /0-1278390213/master 2024-11-21T00:18:43,511 INFO [M:0;5ed4808ef0e6:39345 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/.lastflushedseqids 2024-11-21T00:18:43,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:43,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33987-0x1015ac198060001, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:43,568 INFO [RS:0;5ed4808ef0e6:33987 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:43,568 INFO [RS:0;5ed4808ef0e6:33987 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,33987,1732148307875; zookeeper connection closed. 2024-11-21T00:18:43,571 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@442ddcca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@442ddcca 2024-11-21T00:18:43,572 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:18:43,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741842_1018 (size=181) 2024-11-21T00:18:43,583 INFO [M:0;5ed4808ef0e6:39345 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:18:43,583 INFO [M:0;5ed4808ef0e6:39345 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:18:43,583 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:43,583 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:43,583 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:43,583 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:43,583 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:43,583 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=35.09 KB heapSize=42.23 KB 2024-11-21T00:18:43,646 DEBUG [M:0;5ed4808ef0e6:39345 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7aa0ebf721c49bdb0606f4d64977e1c is 82, key is hbase:meta,,1/info:regioninfo/1732148311475/Put/seqid=0 2024-11-21T00:18:43,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741843_1019 (size=5672) 2024-11-21T00:18:44,088 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7aa0ebf721c49bdb0606f4d64977e1c 2024-11-21T00:18:44,190 DEBUG [M:0;5ed4808ef0e6:39345 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9f4ff65ac654c5aaf1d1fc0bd1adb16 is 1478, key is \x00\x00\x00\x00\x00\x00\x00\x05/proc:d/1732148318176/Put/seqid=0 2024-11-21T00:18:44,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741844_1020 (size=7203) 2024-11-21T00:18:44,627 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=34.54 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9f4ff65ac654c5aaf1d1fc0bd1adb16 2024-11-21T00:18:44,650 DEBUG [M:0;5ed4808ef0e6:39345 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f39f48d62dad4fc29afea4f3bb4984e3 is 69, key is 5ed4808ef0e6,33987,1732148307875/rs:state/1732148310248/Put/seqid=0 2024-11-21T00:18:44,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741845_1021 (size=5156) 2024-11-21T00:18:44,655 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f39f48d62dad4fc29afea4f3bb4984e3 2024-11-21T00:18:44,664 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c7aa0ebf721c49bdb0606f4d64977e1c as hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c7aa0ebf721c49bdb0606f4d64977e1c 2024-11-21T00:18:44,671 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c7aa0ebf721c49bdb0606f4d64977e1c, entries=8, sequenceid=70, filesize=5.5 K 2024-11-21T00:18:44,680 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f9f4ff65ac654c5aaf1d1fc0bd1adb16 as hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9f4ff65ac654c5aaf1d1fc0bd1adb16 2024-11-21T00:18:44,696 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f9f4ff65ac654c5aaf1d1fc0bd1adb16, entries=8, sequenceid=70, filesize=7.0 K 2024-11-21T00:18:44,698 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f39f48d62dad4fc29afea4f3bb4984e3 as hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f39f48d62dad4fc29afea4f3bb4984e3 2024-11-21T00:18:44,711 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36001/user/jenkins/test-data/50e024dc-5ac8-ee12-b700-e6fd626db182/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f39f48d62dad4fc29afea4f3bb4984e3, entries=1, sequenceid=70, filesize=5.0 K 2024-11-21T00:18:44,713 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(3140): Finished flush of dataSize ~35.09 KB/35933, heapSize ~41.94 KB/42944, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1130ms, sequenceid=70, compaction requested=false 2024-11-21T00:18:44,720 INFO [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:44,720 DEBUG [M:0;5ed4808ef0e6:39345 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148323583Disabling compacts and flushes for region at 1732148323583Disabling writes for close at 1732148323583Obtaining lock to block concurrent updates at 1732148323583Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148323583Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=35933, getHeapSize=43184, getOffHeapSize=0, getCellsCount=83 at 1732148323584 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148323588 (+4 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148323589 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148323645 (+56 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148323645Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148324123 (+478 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148324189 (+66 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148324189Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148324633 (+444 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148324650 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148324650Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fd43cfc: reopening flushed file at 1732148324661 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@88da8c6: reopening flushed file at 1732148324671 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19031817: reopening flushed file at 1732148324696 (+25 ms)Finished flush of dataSize ~35.09 KB/35933, heapSize ~41.94 KB/42944, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1130ms, sequenceid=70, compaction requested=false at 1732148324713 (+17 ms)Writing region close event to WAL at 1732148324720 (+7 ms)Closed at 1732148324720 2024-11-21T00:18:44,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42325 is added to blk_1073741830_1006 (size=41008) 2024-11-21T00:18:44,725 INFO [M:0;5ed4808ef0e6:39345 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:18:44,725 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:18:44,725 INFO [M:0;5ed4808ef0e6:39345 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39345 2024-11-21T00:18:44,726 INFO [M:0;5ed4808ef0e6:39345 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:18:44,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:44,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39345-0x1015ac198060000, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:18:44,911 INFO [M:0;5ed4808ef0e6:39345 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:18:44,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4828004d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:44,936 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d554b36{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:44,936 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:44,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10fb0c59{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:44,937 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1474973e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:44,938 WARN [BP-1011016395-172.17.0.2-1732148305013 heartbeating to localhost/127.0.0.1:36001 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:18:44,938 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:18:44,939 WARN [BP-1011016395-172.17.0.2-1732148305013 heartbeating to localhost/127.0.0.1:36001 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1011016395-172.17.0.2-1732148305013 (Datanode Uuid e821f767-cf8e-40fa-869b-0c8d25e1e9c4) service to localhost/127.0.0.1:36001 2024-11-21T00:18:44,939 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:18:44,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5/data/data1/current/BP-1011016395-172.17.0.2-1732148305013 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:44,939 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/cluster_5188eafb-8588-6de2-f52d-6e913abd18f5/data/data2/current/BP-1011016395-172.17.0.2-1732148305013 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:18:44,940 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:18:44,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7597814{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:44,953 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b56bf5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:18:44,953 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:18:44,955 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aae1200{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:18:44,955 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4343dd3b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b7cb3b37-9364-c685-0d90-63a0e4c5fc9e/hadoop.log.dir/,STOPPED} 2024-11-21T00:18:44,970 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:18:45,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:18:45,008 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testRemoveBasePeerConfigWithoutExistingConfigForReplicationPeer Thread=136 (was 108) Potentially hanging thread: HMaster-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-7-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-6-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:40639 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-7-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-6-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40639 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-7-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:63439) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:40639 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-9-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:40639 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:40639 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36001 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-6-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:36001 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:36001 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:36001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40639 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:36001 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:63439) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) - Thread LEAK? -, OpenFileDescriptor=464 (was 428) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=715 (was 830), ProcessCount=11 (was 11), AvailableMemoryMB=2076 (was 1763) - AvailableMemoryMB LEAK? - 2024-11-21T00:18:45,016 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testHFileReplicationForConfiguredTableCfs Thread=136, OpenFileDescriptor=464, MaxFileDescriptor=1048576, SystemLoadAverage=715, ProcessCount=11, AvailableMemoryMB=2076 2024-11-21T00:18:45,034 INFO [Time-limited test {}] replication.TestMasterReplication(356): testHFileReplicationForConfiguredTableCfs 2024-11-21T00:18:45,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.log.dir so I do NOT create it in target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81 2024-11-21T00:18:45,035 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:18:45,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.tmp.dir so I do NOT create it in target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81 2024-11-21T00:18:45,035 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9f038e24-5ca5-c776-8283-7f880668eced/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:18:45,036 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81 2024-11-21T00:18:45,036 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8, deleteOnExit=true 2024-11-21T00:18:45,039 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8/zookeeper_0, clientPort=58140, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:18:45,040 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58140 2024-11-21T00:18:45,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:18:45,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:18:45,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/test.cache.data in system properties and HBase conf 2024-11-21T00:18:45,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:18:45,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:18:45,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:18:45,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:18:45,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:18:45,041 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:18:45,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:45,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:18:45,042 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:45,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:18:45,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:18:45,062 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015ac198060005, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:18:45,062 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015ac198060005, quorum=127.0.0.1:63439, baseZNode=/1-1865054975 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:18:45,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015ac198060002, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:18:45,063 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015ac198060002, quorum=127.0.0.1:63439, baseZNode=/0-1278390213 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:18:45,492 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:45,500 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:45,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:45,518 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:45,518 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:18:45,519 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:45,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bcf2990{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:45,520 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69ee97df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:45,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@358a93e0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/java.io.tmpdir/jetty-localhost-46007-hadoop-hdfs-3_4_1-tests_jar-_-any-11785001773989085288/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:45,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@59561a4d{HTTP/1.1, (http/1.1)}{localhost:46007} 2024-11-21T00:18:45,622 INFO [Time-limited test {}] server.Server(415): Started @60171ms 2024-11-21T00:18:46,059 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:46,063 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:46,064 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:46,064 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:46,064 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:18:46,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fc1d38b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:46,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35e7e964{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:46,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9df7998{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/java.io.tmpdir/jetty-localhost-40859-hadoop-hdfs-3_4_1-tests_jar-_-any-8364492792538029220/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:46,165 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ecb522c{HTTP/1.1, (http/1.1)}{localhost:40859} 2024-11-21T00:18:46,166 INFO [Time-limited test {}] server.Server(415): Started @60715ms 2024-11-21T00:18:46,167 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:18:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:18:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:18:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:47,404 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8/data/data2/current/BP-1033271834-172.17.0.2-1732148325074/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:47,404 WARN [Thread-828 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8/data/data1/current/BP-1033271834-172.17.0.2-1732148325074/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:47,419 WARN [Thread-816 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:18:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdcbf4ebb5f33948a with lease ID 0xde4d3697599714f9: Processing first storage report for DS-7801d401-6f3f-4630-a52f-d30dd62f106d from datanode DatanodeRegistration(127.0.0.1:38023, datanodeUuid=9111521d-3041-4a5e-b511-c11db812c5e1, infoPort=45785, infoSecurePort=0, ipcPort=36571, storageInfo=lv=-57;cid=testClusterID;nsid=530403037;c=1732148325074) 2024-11-21T00:18:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcbf4ebb5f33948a with lease ID 0xde4d3697599714f9: from storage DS-7801d401-6f3f-4630-a52f-d30dd62f106d node DatanodeRegistration(127.0.0.1:38023, datanodeUuid=9111521d-3041-4a5e-b511-c11db812c5e1, infoPort=45785, infoSecurePort=0, ipcPort=36571, storageInfo=lv=-57;cid=testClusterID;nsid=530403037;c=1732148325074), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T00:18:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdcbf4ebb5f33948a with lease ID 0xde4d3697599714f9: Processing first storage report for DS-7a5de5af-5bb9-43c3-bbb0-48d0a6991ea6 from datanode DatanodeRegistration(127.0.0.1:38023, datanodeUuid=9111521d-3041-4a5e-b511-c11db812c5e1, infoPort=45785, infoSecurePort=0, ipcPort=36571, storageInfo=lv=-57;cid=testClusterID;nsid=530403037;c=1732148325074) 2024-11-21T00:18:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdcbf4ebb5f33948a with lease ID 0xde4d3697599714f9: from storage DS-7a5de5af-5bb9-43c3-bbb0-48d0a6991ea6 node DatanodeRegistration(127.0.0.1:38023, datanodeUuid=9111521d-3041-4a5e-b511-c11db812c5e1, infoPort=45785, infoSecurePort=0, ipcPort=36571, storageInfo=lv=-57;cid=testClusterID;nsid=530403037;c=1732148325074), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:47,425 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81 2024-11-21T00:18:47,425 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:47,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:47,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:18:47,757 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:47,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:47,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:47,791 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:47,837 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da with version=8 2024-11-21T00:18:47,838 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/hbase-staging 2024-11-21T00:18:47,841 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:47,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:47,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:47,841 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:47,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:47,841 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:47,841 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:18:47,841 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:47,844 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34855 2024-11-21T00:18:47,846 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34855 connecting to ZooKeeper ensemble=127.0.0.1:58140 2024-11-21T00:18:47,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348550x0, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:47,892 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34855-0x1015ac1e6590000 connected 2024-11-21T00:18:47,984 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:47,986 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:47,990 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on znode that does not yet exist, /0891878329/running 2024-11-21T00:18:47,991 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da, hbase.cluster.distributed=false 2024-11-21T00:18:47,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on znode that does not yet exist, /0891878329/acl 2024-11-21T00:18:47,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34855 2024-11-21T00:18:47,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34855 2024-11-21T00:18:47,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34855 2024-11-21T00:18:47,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34855 2024-11-21T00:18:47,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34855 2024-11-21T00:18:48,017 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:48,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:48,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:48,017 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:48,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:48,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:48,017 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:18:48,017 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:48,018 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42853 2024-11-21T00:18:48,019 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42853 connecting to ZooKeeper ensemble=127.0.0.1:58140 2024-11-21T00:18:48,020 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:48,022 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:48,036 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428530x0, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:48,037 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428530x0, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on znode that does not yet exist, /0891878329/running 2024-11-21T00:18:48,037 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42853-0x1015ac1e6590001 connected 2024-11-21T00:18:48,037 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:18:48,040 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:18:48,040 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on znode that does not yet exist, /0891878329/master 2024-11-21T00:18:48,041 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on znode that does not yet exist, /0891878329/acl 2024-11-21T00:18:48,046 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42853 2024-11-21T00:18:48,047 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42853 2024-11-21T00:18:48,052 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42853 2024-11-21T00:18:48,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42853 2024-11-21T00:18:48,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42853 2024-11-21T00:18:48,079 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:34855 2024-11-21T00:18:48,080 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0891878329/backup-masters/5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:48,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329/backup-masters 2024-11-21T00:18:48,089 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329/backup-masters 2024-11-21T00:18:48,090 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on existing znode=/0891878329/backup-masters/5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:48,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:48,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0891878329/master 2024-11-21T00:18:48,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:48,100 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on existing znode=/0891878329/master 2024-11-21T00:18:48,105 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0891878329/backup-masters/5ed4808ef0e6,34855,1732148327840 from backup master directory 2024-11-21T00:18:48,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329/backup-masters 2024-11-21T00:18:48,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0891878329/backup-masters/5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:48,115 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:48,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329/backup-masters 2024-11-21T00:18:48,115 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:48,119 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/hbase.id] with ID: a26cfe3b-fea0-49aa-8533-f1a70be2e18f 2024-11-21T00:18:48,119 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/.tmp/hbase.id 2024-11-21T00:18:48,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:18:48,526 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/.tmp/hbase.id]:[hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/hbase.id] 2024-11-21T00:18:48,556 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:48,556 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:18:48,558 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-21T00:18:48,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:48,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:48,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:18:49,009 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:49,010 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:18:49,010 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:49,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:18:49,419 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store 2024-11-21T00:18:49,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:18:49,829 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:49,829 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:49,829 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:49,829 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:49,829 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:49,829 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:49,829 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:49,830 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148329829Disabling compacts and flushes for region at 1732148329829Disabling writes for close at 1732148329829Writing region close event to WAL at 1732148329829Closed at 1732148329829 2024-11-21T00:18:49,831 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/.initializing 2024-11-21T00:18:49,831 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/WALs/5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:49,832 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:49,834 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C34855%2C1732148327840, suffix=, logDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/WALs/5ed4808ef0e6,34855,1732148327840, archiveDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/oldWALs, maxLogs=10 2024-11-21T00:18:49,849 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/WALs/5ed4808ef0e6,34855,1732148327840/5ed4808ef0e6%2C34855%2C1732148327840.1732148329834, exclude list is [], retry=0 2024-11-21T00:18:49,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38023,DS-7801d401-6f3f-4630-a52f-d30dd62f106d,DISK] 2024-11-21T00:18:49,863 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/WALs/5ed4808ef0e6,34855,1732148327840/5ed4808ef0e6%2C34855%2C1732148327840.1732148329834 2024-11-21T00:18:49,864 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45785:45785)] 2024-11-21T00:18:49,864 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:49,864 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:49,865 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,865 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:18:49,870 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:49,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:49,871 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,873 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:18:49,873 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:49,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:49,874 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:18:49,876 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:49,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:49,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:18:49,879 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:49,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:49,880 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,881 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,881 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,882 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,882 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,883 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:49,884 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:49,886 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:49,887 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65476766, jitterRate=-0.024320155382156372}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:49,887 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148329865Initializing all the Stores at 1732148329866 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148329866Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148329868 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148329868Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148329868Cleaning up temporary data from old regions at 1732148329882 (+14 ms)Region opened successfully at 1732148329887 (+5 ms) 2024-11-21T00:18:49,887 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:18:49,892 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fc7c9cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:49,893 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:18:49,893 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:18:49,893 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:18:49,893 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:18:49,896 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 3 msec 2024-11-21T00:18:49,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:18:49,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:18:49,914 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:18:49,915 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Unable to get data of znode /0891878329/balancer because node does not exist (not necessarily an error) 2024-11-21T00:18:49,931 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0891878329/balancer already deleted, retry=false 2024-11-21T00:18:49,931 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:18:49,932 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Unable to get data of znode /0891878329/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:18:49,942 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0891878329/normalizer already deleted, retry=false 2024-11-21T00:18:49,942 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:18:50,020 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Unable to get data of znode /0891878329/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:18:50,081 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0891878329/switch/split already deleted, retry=false 2024-11-21T00:18:50,082 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Unable to get data of znode /0891878329/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:18:50,115 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0891878329/switch/merge already deleted, retry=false 2024-11-21T00:18:50,118 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Unable to get data of znode /0891878329/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:18:50,137 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0891878329/snapshot-cleanup already deleted, retry=false 2024-11-21T00:18:50,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0891878329/running 2024-11-21T00:18:50,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:50,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0891878329/running 2024-11-21T00:18:50,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:50,147 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,34855,1732148327840, sessionid=0x1015ac1e6590000, setting cluster-up flag (Was=false) 2024-11-21T00:18:50,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:50,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:50,199 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0891878329/flush-table-proc/acquired, /0891878329/flush-table-proc/reached, /0891878329/flush-table-proc/abort 2024-11-21T00:18:50,200 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:50,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:50,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:50,252 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0891878329/online-snapshot/acquired, /0891878329/online-snapshot/reached, /0891878329/online-snapshot/abort 2024-11-21T00:18:50,253 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:50,254 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:18:50,256 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:50,256 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:18:50,256 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:18:50,257 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,34855,1732148327840 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:50,258 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,261 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:50,261 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:18:50,262 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:50,263 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:18:50,264 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148360264 2024-11-21T00:18:50,264 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:18:50,264 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:18:50,264 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:18:50,264 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:18:50,264 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:18:50,264 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:18:50,267 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,267 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:18:50,267 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:18:50,267 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:18:50,267 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:18:50,268 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(746): ClusterId : a26cfe3b-fea0-49aa-8533-f1a70be2e18f 2024-11-21T00:18:50,268 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:18:50,268 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:18:50,268 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:18:50,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:18:50,272 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148330268,5,FailOnTimeoutGroup] 2024-11-21T00:18:50,274 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148330272,5,FailOnTimeoutGroup] 2024-11-21T00:18:50,274 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,274 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:18:50,274 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,274 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,279 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:18:50,279 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:18:50,289 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:18:50,290 DEBUG [RS:0;5ed4808ef0e6:42853 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b681da4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:50,300 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:42853 2024-11-21T00:18:50,300 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:18:50,300 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:18:50,300 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:18:50,301 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,34855,1732148327840 with port=42853, startcode=1732148328016 2024-11-21T00:18:50,301 DEBUG [RS:0;5ed4808ef0e6:42853 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:18:50,302 INFO [HMaster-EventLoopGroup-13-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35789, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:18:50,303 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:50,303 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34855 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:50,304 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da 2024-11-21T00:18:50,304 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35003 2024-11-21T00:18:50,304 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:18:50,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329/rs 2024-11-21T00:18:50,316 DEBUG [RS:0;5ed4808ef0e6:42853 {}] zookeeper.ZKUtil(111): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on existing znode=/0891878329/rs/5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:50,316 WARN [RS:0;5ed4808ef0e6:42853 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:50,316 INFO [RS:0;5ed4808ef0e6:42853 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:50,316 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:50,316 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,42853,1732148328016] 2024-11-21T00:18:50,322 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:18:50,328 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:18:50,330 INFO [RS:0;5ed4808ef0e6:42853 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:18:50,330 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,331 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:18:50,332 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:18:50,332 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,332 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,333 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:50,333 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:50,333 DEBUG [RS:0;5ed4808ef0e6:42853 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:50,334 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,334 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,334 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,334 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,334 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,334 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42853,1732148328016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:50,347 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:18:50,347 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42853,1732148328016-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,347 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,347 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.Replication(171): 5ed4808ef0e6,42853,1732148328016 started 2024-11-21T00:18:50,362 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:50,362 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,42853,1732148328016, RpcServer on 5ed4808ef0e6/172.17.0.2:42853, sessionid=0x1015ac1e6590001 2024-11-21T00:18:50,362 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:18:50,362 DEBUG [RS:0;5ed4808ef0e6:42853 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:50,362 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,42853,1732148328016' 2024-11-21T00:18:50,362 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0891878329/flush-table-proc/abort' 2024-11-21T00:18:50,363 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0891878329/flush-table-proc/acquired' 2024-11-21T00:18:50,363 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:18:50,363 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:18:50,363 DEBUG [RS:0;5ed4808ef0e6:42853 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:50,363 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,42853,1732148328016' 2024-11-21T00:18:50,363 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0891878329/online-snapshot/abort' 2024-11-21T00:18:50,364 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0891878329/online-snapshot/acquired' 2024-11-21T00:18:50,364 DEBUG [RS:0;5ed4808ef0e6:42853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:18:50,364 INFO [RS:0;5ed4808ef0e6:42853 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:18:50,364 INFO [RS:0;5ed4808ef0e6:42853 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:18:50,464 INFO [RS:0;5ed4808ef0e6:42853 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:50,466 INFO [RS:0;5ed4808ef0e6:42853 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C42853%2C1732148328016, suffix=, logDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016, archiveDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/oldWALs, maxLogs=10 2024-11-21T00:18:50,478 DEBUG [RS:0;5ed4808ef0e6:42853 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466, exclude list is [], retry=0 2024-11-21T00:18:50,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38023,DS-7801d401-6f3f-4630-a52f-d30dd62f106d,DISK] 2024-11-21T00:18:50,482 INFO [RS:0;5ed4808ef0e6:42853 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 2024-11-21T00:18:50,482 DEBUG [RS:0;5ed4808ef0e6:42853 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45785:45785)] 2024-11-21T00:18:50,670 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:18:50,671 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da 2024-11-21T00:18:50,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:18:51,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:51,078 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:51,079 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:51,080 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:51,082 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:51,082 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,082 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:51,084 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:51,084 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:51,086 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:51,086 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,087 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:51,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740 2024-11-21T00:18:51,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740 2024-11-21T00:18:51,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:51,089 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:51,089 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:51,091 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:51,093 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:51,094 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66756882, jitterRate=-0.005244940519332886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:51,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148331077Initializing all the Stores at 1732148331078 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148331078Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148331078Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148331078Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148331078Cleaning up temporary data from old regions at 1732148331089 (+11 ms)Region opened successfully at 1732148331094 (+5 ms) 2024-11-21T00:18:51,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:51,095 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:51,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:51,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:51,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:51,095 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:51,095 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148331095Disabling compacts and flushes for region at 1732148331095Disabling writes for close at 1732148331095Writing region close event to WAL at 1732148331095Closed at 1732148331095 2024-11-21T00:18:51,096 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:51,096 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:18:51,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:18:51,098 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:51,099 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:18:51,250 DEBUG [5ed4808ef0e6:34855 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:18:51,251 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:51,252 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,42853,1732148328016, state=OPENING 2024-11-21T00:18:51,299 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:18:51,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:51,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:18:51,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0891878329/meta-region-server: CHANGED 2024-11-21T00:18:51,310 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:51,310 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0891878329/meta-region-server: CHANGED 2024-11-21T00:18:51,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,42853,1732148328016}] 2024-11-21T00:18:51,464 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:18:51,465 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52929, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:18:51,469 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:18:51,469 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:51,469 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:18:51,471 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C42853%2C1732148328016.meta, suffix=.meta, logDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016, archiveDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/oldWALs, maxLogs=10 2024-11-21T00:18:51,482 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.meta.1732148331471.meta, exclude list is [], retry=0 2024-11-21T00:18:51,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38023,DS-7801d401-6f3f-4630-a52f-d30dd62f106d,DISK] 2024-11-21T00:18:51,486 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.meta.1732148331471.meta 2024-11-21T00:18:51,487 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45785:45785)] 2024-11-21T00:18:51,487 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:51,487 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:51,487 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:51,487 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:18:51,488 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:18:51,488 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:18:51,488 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:51,488 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:18:51,488 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:18:51,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:51,490 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:51,490 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,490 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,491 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:51,491 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:51,491 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,492 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:51,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:51,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:51,493 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:51,494 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:51,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:51,494 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:51,495 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740 2024-11-21T00:18:51,496 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740 2024-11-21T00:18:51,497 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:51,497 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:51,498 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:51,499 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:51,500 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62097361, jitterRate=-0.07467721402645111}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:51,500 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:18:51,501 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148331488Writing region info on filesystem at 1732148331488Initializing all the Stores at 1732148331489 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148331489Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148331489Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148331489Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148331489Cleaning up temporary data from old regions at 1732148331497 (+8 ms)Running coprocessor post-open hooks at 1732148331500 (+3 ms)Region opened successfully at 1732148331501 (+1 ms) 2024-11-21T00:18:51,502 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148331463 2024-11-21T00:18:51,504 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:18:51,504 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:18:51,505 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:51,506 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,42853,1732148328016, state=OPEN 2024-11-21T00:18:51,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0891878329/meta-region-server 2024-11-21T00:18:51,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0891878329/meta-region-server 2024-11-21T00:18:51,572 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:51,572 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0891878329/meta-region-server: CHANGED 2024-11-21T00:18:51,572 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0891878329/meta-region-server: CHANGED 2024-11-21T00:18:51,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:18:51,576 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,42853,1732148328016 in 262 msec 2024-11-21T00:18:51,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:18:51,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 478 msec 2024-11-21T00:18:51,579 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:51,579 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:18:51,580 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:51,580 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42853,1732148328016, seqNum=-1] 2024-11-21T00:18:51,580 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:51,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55273, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:51,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3300 sec 2024-11-21T00:18:51,587 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148331587, completionTime=-1 2024-11-21T00:18:51,587 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:18:51,588 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:18:51,589 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:18:51,589 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148391589 2024-11-21T00:18:51,589 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148451589 2024-11-21T00:18:51,590 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-21T00:18:51,590 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34855,1732148327840-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:51,590 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34855,1732148327840-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:51,590 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34855,1732148327840-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:51,590 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:34855, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:51,590 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:51,590 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:51,592 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:18:51,594 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.479sec 2024-11-21T00:18:51,594 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:18:51,594 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:18:51,594 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:18:51,595 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:18:51,595 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:18:51,595 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34855,1732148327840-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:51,595 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34855,1732148327840-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:18:51,597 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:18:51,598 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:18:51,598 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34855,1732148327840-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:51,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36eba5d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:51,672 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34855,-1 for getting cluster id 2024-11-21T00:18:51,672 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:51,674 DEBUG [HMaster-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a26cfe3b-fea0-49aa-8533-f1a70be2e18f' 2024-11-21T00:18:51,674 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:51,674 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a26cfe3b-fea0-49aa-8533-f1a70be2e18f" 2024-11-21T00:18:51,675 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42b4e5e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:51,675 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34855,-1] 2024-11-21T00:18:51,675 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:51,675 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:51,676 INFO [HMaster-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:51,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16120977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:51,678 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:51,679 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42853,1732148328016, seqNum=-1] 2024-11-21T00:18:51,680 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:51,685 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51268, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:51,690 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:51,692 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:58140 2024-11-21T00:18:51,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:51,711 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015ac1e6590002 connected 2024-11-21T00:18:51,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.log.dir so I do NOT create it in target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991 2024-11-21T00:18:51,737 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:18:51,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.tmp.dir so I do NOT create it in target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991 2024-11-21T00:18:51,737 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:18:51,737 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991 2024-11-21T00:18:51,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:18:51,737 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/cluster_f839d63c-e580-846d-0cf4-5a2d6877035b, deleteOnExit=true 2024-11-21T00:18:51,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:18:51,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/test.cache.data in system properties and HBase conf 2024-11-21T00:18:51,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:18:51,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:18:51,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:18:51,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:18:51,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:18:51,738 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:18:51,738 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:18:51,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:18:51,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:18:51,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:18:51,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:18:52,338 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:52,343 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:52,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:52,344 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:52,344 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:18:52,344 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:52,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@346e4b0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:52,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@221cfbfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:52,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@32cecb8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/java.io.tmpdir/jetty-localhost-35841-hadoop-hdfs-3_4_1-tests_jar-_-any-10621040894150579930/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:18:52,459 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6003bd66{HTTP/1.1, (http/1.1)}{localhost:35841} 2024-11-21T00:18:52,459 INFO [Time-limited test {}] server.Server(415): Started @67009ms 2024-11-21T00:18:52,751 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:18:52,756 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:18:52,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:18:52,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:18:52,765 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:18:52,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c54674c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:18:52,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25ed74cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:18:52,894 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fbfa30f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/java.io.tmpdir/jetty-localhost-37455-hadoop-hdfs-3_4_1-tests_jar-_-any-9974584513777314421/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:18:52,894 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27bec7a{HTTP/1.1, (http/1.1)}{localhost:37455} 2024-11-21T00:18:52,894 INFO [Time-limited test {}] server.Server(415): Started @67444ms 2024-11-21T00:18:52,896 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:18:53,807 WARN [Thread-949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/cluster_f839d63c-e580-846d-0cf4-5a2d6877035b/data/data1/current/BP-1240653071-172.17.0.2-1732148331766/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:53,808 WARN [Thread-950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/cluster_f839d63c-e580-846d-0cf4-5a2d6877035b/data/data2/current/BP-1240653071-172.17.0.2-1732148331766/current, will proceed with Du for space computation calculation, 2024-11-21T00:18:53,837 WARN [Thread-937 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:18:53,846 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ab7de0469384903 with lease ID 0x5fd1b2eae625c91e: Processing first storage report for DS-9c71e35a-568c-4334-b580-a011bf4b13a0 from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=d593947b-8443-4d6c-bf83-d72092d08959, infoPort=40589, infoSecurePort=0, ipcPort=37721, storageInfo=lv=-57;cid=testClusterID;nsid=892160575;c=1732148331766) 2024-11-21T00:18:53,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ab7de0469384903 with lease ID 0x5fd1b2eae625c91e: from storage DS-9c71e35a-568c-4334-b580-a011bf4b13a0 node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=d593947b-8443-4d6c-bf83-d72092d08959, infoPort=40589, infoSecurePort=0, ipcPort=37721, storageInfo=lv=-57;cid=testClusterID;nsid=892160575;c=1732148331766), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:53,846 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2ab7de0469384903 with lease ID 0x5fd1b2eae625c91e: Processing first storage report for DS-0053bccd-0800-466d-948b-c1c652ae4abd from datanode DatanodeRegistration(127.0.0.1:35783, datanodeUuid=d593947b-8443-4d6c-bf83-d72092d08959, infoPort=40589, infoSecurePort=0, ipcPort=37721, storageInfo=lv=-57;cid=testClusterID;nsid=892160575;c=1732148331766) 2024-11-21T00:18:53,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2ab7de0469384903 with lease ID 0x5fd1b2eae625c91e: from storage DS-0053bccd-0800-466d-948b-c1c652ae4abd node DatanodeRegistration(127.0.0.1:35783, datanodeUuid=d593947b-8443-4d6c-bf83-d72092d08959, infoPort=40589, infoSecurePort=0, ipcPort=37721, storageInfo=lv=-57;cid=testClusterID;nsid=892160575;c=1732148331766), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:18:53,912 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991 2024-11-21T00:18:53,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:53,915 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:53,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:18:54,325 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b with version=8 2024-11-21T00:18:54,325 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/hbase-staging 2024-11-21T00:18:54,327 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:54,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:54,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:54,328 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:54,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:54,328 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:54,328 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:18:54,328 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:54,329 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36869 2024-11-21T00:18:54,330 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36869 connecting to ZooKeeper ensemble=127.0.0.1:58140 2024-11-21T00:18:54,352 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:368690x0, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:54,356 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36869-0x1015ac1e6590003 connected 2024-11-21T00:18:54,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:54,427 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:54,429 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on znode that does not yet exist, /1-1464671649/running 2024-11-21T00:18:54,430 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b, hbase.cluster.distributed=false 2024-11-21T00:18:54,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on znode that does not yet exist, /1-1464671649/acl 2024-11-21T00:18:54,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36869 2024-11-21T00:18:54,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36869 2024-11-21T00:18:54,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36869 2024-11-21T00:18:54,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36869 2024-11-21T00:18:54,441 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36869 2024-11-21T00:18:54,460 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:18:54,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:54,461 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:54,461 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:18:54,461 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:18:54,461 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:18:54,461 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:18:54,461 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:18:54,465 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35605 2024-11-21T00:18:54,466 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35605 connecting to ZooKeeper ensemble=127.0.0.1:58140 2024-11-21T00:18:54,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:54,469 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:54,484 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:356050x0, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:54,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:356050x0, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on znode that does not yet exist, /1-1464671649/running 2024-11-21T00:18:54,484 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:18:54,485 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35605-0x1015ac1e6590004 connected 2024-11-21T00:18:54,489 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:18:54,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on znode that does not yet exist, /1-1464671649/master 2024-11-21T00:18:54,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on znode that does not yet exist, /1-1464671649/acl 2024-11-21T00:18:54,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35605 2024-11-21T00:18:54,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35605 2024-11-21T00:18:54,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35605 2024-11-21T00:18:54,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35605 2024-11-21T00:18:54,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35605 2024-11-21T00:18:54,520 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:36869 2024-11-21T00:18:54,521 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-1464671649/backup-masters/5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:54,531 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649/backup-masters 2024-11-21T00:18:54,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649/backup-masters 2024-11-21T00:18:54,531 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on existing znode=/1-1464671649/backup-masters/5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:54,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:54,542 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1464671649/master 2024-11-21T00:18:54,542 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:54,542 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on existing znode=/1-1464671649/master 2024-11-21T00:18:54,543 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-1464671649/backup-masters/5ed4808ef0e6,36869,1732148334327 from backup master directory 2024-11-21T00:18:54,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1464671649/backup-masters/5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:54,552 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649/backup-masters 2024-11-21T00:18:54,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649/backup-masters 2024-11-21T00:18:54,552 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:54,552 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:54,555 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/hbase.id] with ID: 281f17dd-ba1b-4202-86e4-c12531773b29 2024-11-21T00:18:54,555 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/.tmp/hbase.id 2024-11-21T00:18:54,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:18:54,961 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/.tmp/hbase.id]:[hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/hbase.id] 2024-11-21T00:18:54,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:18:54,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:18:54,975 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:18:54,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:54,989 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:54,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:18:55,395 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:55,396 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:18:55,396 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:55,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:18:55,809 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store 2024-11-21T00:18:55,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:18:56,217 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:56,218 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:18:56,218 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:56,218 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:56,218 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:18:56,218 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:56,218 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:18:56,218 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148336217Disabling compacts and flushes for region at 1732148336217Disabling writes for close at 1732148336218 (+1 ms)Writing region close event to WAL at 1732148336218Closed at 1732148336218 2024-11-21T00:18:56,220 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/.initializing 2024-11-21T00:18:56,220 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/WALs/5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:56,222 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:56,224 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C36869%2C1732148334327, suffix=, logDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/WALs/5ed4808ef0e6,36869,1732148334327, archiveDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/oldWALs, maxLogs=10 2024-11-21T00:18:56,243 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/WALs/5ed4808ef0e6,36869,1732148334327/5ed4808ef0e6%2C36869%2C1732148334327.1732148336224, exclude list is [], retry=0 2024-11-21T00:18:56,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35783,DS-9c71e35a-568c-4334-b580-a011bf4b13a0,DISK] 2024-11-21T00:18:56,279 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/WALs/5ed4808ef0e6,36869,1732148334327/5ed4808ef0e6%2C36869%2C1732148334327.1732148336224 2024-11-21T00:18:56,279 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40589:40589)] 2024-11-21T00:18:56,279 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:56,280 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:56,280 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,280 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:18:56,286 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:56,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:56,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,289 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:18:56,289 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:56,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:56,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:18:56,298 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:56,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:56,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:18:56,302 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:56,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:56,302 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,303 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,304 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,306 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,306 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,307 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:56,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:18:56,308 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T00:18:56,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:18:56,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver Metrics about HBase RegionObservers 2024-11-21T00:18:56,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:18:56,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T00:18:56,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:18:56,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T00:18:56,311 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:18:56,317 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:56,318 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68395200, jitterRate=0.01916790008544922}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:56,318 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148336280Initializing all the Stores at 1732148336281 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148336281Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148336284 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148336284Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148336284Cleaning up temporary data from old regions at 1732148336306 (+22 ms)Region opened successfully at 1732148336318 (+12 ms) 2024-11-21T00:18:56,320 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:18:56,322 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:18:56,324 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d749a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:56,325 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:18:56,326 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:18:56,326 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:18:56,326 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:18:56,327 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:18:56,327 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:18:56,327 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:18:56,342 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:18:56,343 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Unable to get data of znode /1-1464671649/balancer because node does not exist (not necessarily an error) 2024-11-21T00:18:56,394 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1464671649/balancer already deleted, retry=false 2024-11-21T00:18:56,394 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:18:56,395 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Unable to get data of znode /1-1464671649/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:18:56,404 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1464671649/normalizer already deleted, retry=false 2024-11-21T00:18:56,405 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:18:56,413 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Unable to get data of znode /1-1464671649/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:18:56,425 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1464671649/switch/split already deleted, retry=false 2024-11-21T00:18:56,426 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Unable to get data of znode /1-1464671649/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:18:56,436 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1464671649/switch/merge already deleted, retry=false 2024-11-21T00:18:56,439 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Unable to get data of znode /1-1464671649/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:18:56,446 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1464671649/snapshot-cleanup already deleted, retry=false 2024-11-21T00:18:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1464671649/running 2024-11-21T00:18:56,458 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1464671649/running 2024-11-21T00:18:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:56,458 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:56,461 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,36869,1732148334327, sessionid=0x1015ac1e6590003, setting cluster-up flag (Was=false) 2024-11-21T00:18:56,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:56,499 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:56,531 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1464671649/flush-table-proc/acquired, /1-1464671649/flush-table-proc/reached, /1-1464671649/flush-table-proc/abort 2024-11-21T00:18:56,532 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:56,552 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:56,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:56,599 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1464671649/online-snapshot/acquired, /1-1464671649/online-snapshot/reached, /1-1464671649/online-snapshot/abort 2024-11-21T00:18:56,601 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:56,603 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:18:56,614 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:56,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:18:56,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:18:56,615 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,36869,1732148334327 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:56,617 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,621 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(746): ClusterId : 281f17dd-ba1b-4202-86e4-c12531773b29 2024-11-21T00:18:56,621 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:18:56,629 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:56,629 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:18:56,631 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:56,631 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:18:56,632 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:18:56,632 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:18:56,643 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:18:56,643 DEBUG [RS:0;5ed4808ef0e6:35605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@726e2a38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:18:56,672 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148366672 2024-11-21T00:18:56,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:18:56,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:18:56,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:18:56,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:18:56,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:18:56,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:18:56,676 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,684 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:35605 2024-11-21T00:18:56,684 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:18:56,684 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:18:56,684 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:18:56,685 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,36869,1732148334327 with port=35605, startcode=1732148334460 2024-11-21T00:18:56,685 DEBUG [RS:0;5ed4808ef0e6:35605 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:18:56,700 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:18:56,701 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:18:56,701 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:18:56,701 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:18:56,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:18:56,717 INFO [HMaster-EventLoopGroup-15-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45135, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:18:56,717 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36869 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:18:56,718 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T00:18:56,718 WARN [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T00:18:56,720 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:18:56,720 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:18:56,725 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148336720,5,FailOnTimeoutGroup] 2024-11-21T00:18:56,725 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148336725,5,FailOnTimeoutGroup] 2024-11-21T00:18:56,725 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,725 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:18:56,725 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,725 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,820 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,36869,1732148334327 with port=35605, startcode=1732148334460 2024-11-21T00:18:56,821 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36869 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:56,821 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36869 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:56,823 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b 2024-11-21T00:18:56,823 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34125 2024-11-21T00:18:56,823 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:18:56,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649/rs 2024-11-21T00:18:56,867 DEBUG [RS:0;5ed4808ef0e6:35605 {}] zookeeper.ZKUtil(111): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on existing znode=/1-1464671649/rs/5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:56,867 WARN [RS:0;5ed4808ef0e6:35605 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:18:56,867 INFO [RS:0;5ed4808ef0e6:35605 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:56,867 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:56,885 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,35605,1732148334460] 2024-11-21T00:18:56,902 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:18:56,916 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:18:56,917 INFO [RS:0;5ed4808ef0e6:35605 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:18:56,917 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,924 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:18:56,925 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:18:56,925 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:56,926 DEBUG [RS:0;5ed4808ef0e6:35605 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:18:56,943 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,943 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,943 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,943 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,943 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,943 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35605,1732148334460-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:56,974 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:18:56,974 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35605,1732148334460-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,975 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,975 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.Replication(171): 5ed4808ef0e6,35605,1732148334460 started 2024-11-21T00:18:56,991 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:18:56,997 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:56,997 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,35605,1732148334460, RpcServer on 5ed4808ef0e6/172.17.0.2:35605, sessionid=0x1015ac1e6590004 2024-11-21T00:18:56,997 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:18:56,997 DEBUG [RS:0;5ed4808ef0e6:35605 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:56,997 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,35605,1732148334460' 2024-11-21T00:18:56,997 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1464671649/flush-table-proc/abort' 2024-11-21T00:18:57,006 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1464671649/flush-table-proc/acquired' 2024-11-21T00:18:57,009 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:18:57,009 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:18:57,009 DEBUG [RS:0;5ed4808ef0e6:35605 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:57,009 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,35605,1732148334460' 2024-11-21T00:18:57,009 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1464671649/online-snapshot/abort' 2024-11-21T00:18:57,010 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1464671649/online-snapshot/acquired' 2024-11-21T00:18:57,012 DEBUG [RS:0;5ed4808ef0e6:35605 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:18:57,012 INFO [RS:0;5ed4808ef0e6:35605 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:18:57,012 INFO [RS:0;5ed4808ef0e6:35605 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:18:57,035 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:57,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:57,037 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:18:57,116 INFO [RS:0;5ed4808ef0e6:35605 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:18:57,117 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:18:57,117 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b 2024-11-21T00:18:57,126 INFO [RS:0;5ed4808ef0e6:35605 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C35605%2C1732148334460, suffix=, logDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460, archiveDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/oldWALs, maxLogs=10 2024-11-21T00:18:57,153 DEBUG [RS:0;5ed4808ef0e6:35605 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460/5ed4808ef0e6%2C35605%2C1732148334460.1732148337127, exclude list is [], retry=0 2024-11-21T00:18:57,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35783,DS-9c71e35a-568c-4334-b580-a011bf4b13a0,DISK] 2024-11-21T00:18:57,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:18:57,185 INFO [RS:0;5ed4808ef0e6:35605 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460/5ed4808ef0e6%2C35605%2C1732148334460.1732148337127 2024-11-21T00:18:57,185 DEBUG [RS:0;5ed4808ef0e6:35605 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40589:40589)] 2024-11-21T00:18:57,562 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:57,576 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:57,579 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:57,579 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:57,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:57,580 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:57,582 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:57,582 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:57,582 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:57,583 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:57,585 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:57,585 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:57,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:57,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:57,592 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:57,592 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:57,595 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:57,596 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:57,597 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740 2024-11-21T00:18:57,598 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740 2024-11-21T00:18:57,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:57,600 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:57,601 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:57,603 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:57,612 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:57,613 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63876599, jitterRate=-0.048164501786231995}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:57,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148337563Initializing all the Stores at 1732148337564 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148337564Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148337576 (+12 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148337576Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148337576Cleaning up temporary data from old regions at 1732148337600 (+24 ms)Region opened successfully at 1732148337613 (+13 ms) 2024-11-21T00:18:57,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:18:57,613 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:18:57,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:18:57,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:18:57,613 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:18:57,614 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:18:57,614 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148337613Disabling compacts and flushes for region at 1732148337613Disabling writes for close at 1732148337613Writing region close event to WAL at 1732148337614 (+1 ms)Closed at 1732148337614 2024-11-21T00:18:57,615 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:57,615 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:18:57,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:18:57,621 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:57,623 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:18:57,774 DEBUG [5ed4808ef0e6:36869 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:18:57,775 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:57,778 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,35605,1732148334460, state=OPENING 2024-11-21T00:18:57,789 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:18:57,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:57,810 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:18:57,812 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1464671649/meta-region-server: CHANGED 2024-11-21T00:18:57,816 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1464671649/meta-region-server: CHANGED 2024-11-21T00:18:57,816 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:18:57,817 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35605,1732148334460}] 2024-11-21T00:18:57,981 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:18:57,997 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39021, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:18:58,017 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:18:58,017 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:18:58,017 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:18:58,033 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C35605%2C1732148334460.meta, suffix=.meta, logDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460, archiveDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/oldWALs, maxLogs=10 2024-11-21T00:18:58,059 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460/5ed4808ef0e6%2C35605%2C1732148334460.meta.1732148338034.meta, exclude list is [], retry=0 2024-11-21T00:18:58,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35783,DS-9c71e35a-568c-4334-b580-a011bf4b13a0,DISK] 2024-11-21T00:18:58,100 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460/5ed4808ef0e6%2C35605%2C1732148334460.meta.1732148338034.meta 2024-11-21T00:18:58,108 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40589:40589)] 2024-11-21T00:18:58,108 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:58,109 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:58,109 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:18:58,109 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:18:58,109 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:18:58,109 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:18:58,109 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:58,110 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:18:58,110 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:18:58,132 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:18:58,149 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:18:58,149 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:58,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:58,153 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:18:58,161 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:18:58,161 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:58,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:58,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:18:58,170 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:18:58,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:58,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:58,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:18:58,172 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:18:58,172 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:58,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:18:58,178 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:18:58,180 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740 2024-11-21T00:18:58,189 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740 2024-11-21T00:18:58,194 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:18:58,195 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:18:58,195 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:18:58,197 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:18:58,198 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64961394, jitterRate=-0.031999796628952026}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:18:58,199 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:18:58,199 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148338111Writing region info on filesystem at 1732148338111Initializing all the Stores at 1732148338121 (+10 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148338121Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148338132 (+11 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148338132Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148338132Cleaning up temporary data from old regions at 1732148338195 (+63 ms)Running coprocessor post-open hooks at 1732148338199 (+4 ms)Region opened successfully at 1732148338199 2024-11-21T00:18:58,200 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148337980 2024-11-21T00:18:58,206 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:18:58,206 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:18:58,207 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:58,208 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,35605,1732148334460, state=OPEN 2024-11-21T00:18:58,222 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1464671649/meta-region-server 2024-11-21T00:18:58,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1464671649/meta-region-server 2024-11-21T00:18:58,223 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35605,1732148334460 2024-11-21T00:18:58,223 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1464671649/meta-region-server: CHANGED 2024-11-21T00:18:58,223 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1464671649/meta-region-server: CHANGED 2024-11-21T00:18:58,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:18:58,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35605,1732148334460 in 407 msec 2024-11-21T00:18:58,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:18:58,234 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 612 msec 2024-11-21T00:18:58,235 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:18:58,236 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:18:58,238 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:58,238 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35605,1732148334460, seqNum=-1] 2024-11-21T00:18:58,238 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:58,240 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46849, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:58,248 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6330 sec 2024-11-21T00:18:58,248 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148338248, completionTime=-1 2024-11-21T00:18:58,248 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:18:58,249 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:18:58,251 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:18:58,251 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148398251 2024-11-21T00:18:58,251 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148458251 2024-11-21T00:18:58,251 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:18:58,252 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36869,1732148334327-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:58,252 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36869,1732148334327-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:58,252 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36869,1732148334327-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:58,252 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:36869, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:58,252 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:58,254 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:18:58,256 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.709sec 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36869,1732148334327-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:18:58,261 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36869,1732148334327-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:18:58,263 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:18:58,263 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:18:58,263 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,36869,1732148334327-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:18:58,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a8decf5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:58,332 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36869,-1 for getting cluster id 2024-11-21T00:18:58,332 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:18:58,337 DEBUG [HMaster-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '281f17dd-ba1b-4202-86e4-c12531773b29' 2024-11-21T00:18:58,337 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:18:58,337 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "281f17dd-ba1b-4202-86e4-c12531773b29" 2024-11-21T00:18:58,338 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29fddd86, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:58,338 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36869,-1] 2024-11-21T00:18:58,338 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:18:58,339 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:18:58,342 INFO [HMaster-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55700, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:18:58,344 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fec86dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:18:58,344 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:18:58,346 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35605,1732148334460, seqNum=-1] 2024-11-21T00:18:58,346 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:18:58,347 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38932, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:18:58,350 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,36869,1732148334327 2024-11-21T00:18:58,352 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:58140 2024-11-21T00:18:58,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:18:58,376 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015ac1e6590005 connected 2024-11-21T00:18:58,390 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:18:58,394 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34855,1732148327840 2024-11-21T00:18:58,394 DEBUG [Time-limited test {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@25ef4bd2 2024-11-21T00:18:58,395 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:18:58,396 INFO [HMaster-EventLoopGroup-13-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33596, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:18:58,400 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:18:58,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:18:58,412 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:18:58,413 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:58,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34855 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:18:58,416 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:18:58,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:58,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:18:58,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:58,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:58,868 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3658886116178f402a76844a19dbf457, NAME => 'test,,1732148338397.3658886116178f402a76844a19dbf457.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da 2024-11-21T00:18:58,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:18:59,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:59,287 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148338397.3658886116178f402a76844a19dbf457.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:59,287 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing 3658886116178f402a76844a19dbf457, disabling compactions & flushes 2024-11-21T00:18:59,287 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:18:59,288 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:18:59,288 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148338397.3658886116178f402a76844a19dbf457. after waiting 0 ms 2024-11-21T00:18:59,288 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:18:59,288 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:18:59,288 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3658886116178f402a76844a19dbf457: Waiting for close lock at 1732148339287Disabling compacts and flushes for region at 1732148339287Disabling writes for close at 1732148339288 (+1 ms)Writing region close event to WAL at 1732148339288Closed at 1732148339288 2024-11-21T00:18:59,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:18:59,290 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148338397.3658886116178f402a76844a19dbf457.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148339290"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148339290"}]},"ts":"1732148339290"} 2024-11-21T00:18:59,293 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:18:59,295 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:18:59,295 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148339295"}]},"ts":"1732148339295"} 2024-11-21T00:18:59,297 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:18:59,297 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=3658886116178f402a76844a19dbf457, ASSIGN}] 2024-11-21T00:18:59,299 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=3658886116178f402a76844a19dbf457, ASSIGN 2024-11-21T00:18:59,300 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=3658886116178f402a76844a19dbf457, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,42853,1732148328016; forceNewPlan=false, retain=false 2024-11-21T00:18:59,450 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3658886116178f402a76844a19dbf457, regionState=OPENING, regionLocation=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:59,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=3658886116178f402a76844a19dbf457, ASSIGN because future has completed 2024-11-21T00:18:59,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3658886116178f402a76844a19dbf457, server=5ed4808ef0e6,42853,1732148328016}] 2024-11-21T00:18:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:18:59,614 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:18:59,614 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3658886116178f402a76844a19dbf457, NAME => 'test,,1732148338397.3658886116178f402a76844a19dbf457.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:18:59,614 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:18:59,615 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:18:59,615 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,615 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148338397.3658886116178f402a76844a19dbf457.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:18:59,615 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,615 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,616 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,618 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3658886116178f402a76844a19dbf457 columnFamilyName f 2024-11-21T00:18:59,618 DEBUG [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:59,619 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] regionserver.HStore(327): Store=3658886116178f402a76844a19dbf457/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:59,619 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,620 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3658886116178f402a76844a19dbf457 columnFamilyName f1 2024-11-21T00:18:59,621 DEBUG [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:59,621 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] regionserver.HStore(327): Store=3658886116178f402a76844a19dbf457/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:59,621 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,623 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3658886116178f402a76844a19dbf457 columnFamilyName norep 2024-11-21T00:18:59,623 DEBUG [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:18:59,624 INFO [StoreOpener-3658886116178f402a76844a19dbf457-1 {}] regionserver.HStore(327): Store=3658886116178f402a76844a19dbf457/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:18:59,630 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,637 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,638 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,657 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,657 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,661 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:18:59,671 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,688 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:18:59,689 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3658886116178f402a76844a19dbf457; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61562582, jitterRate=-0.08264604210853577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:18:59,689 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3658886116178f402a76844a19dbf457 2024-11-21T00:18:59,690 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3658886116178f402a76844a19dbf457: Running coprocessor pre-open hook at 1732148339615Writing region info on filesystem at 1732148339615Initializing all the Stores at 1732148339616 (+1 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148339616Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148339616Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148339616Cleaning up temporary data from old regions at 1732148339658 (+42 ms)Running coprocessor post-open hooks at 1732148339689 (+31 ms)Region opened successfully at 1732148339690 (+1 ms) 2024-11-21T00:18:59,694 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148338397.3658886116178f402a76844a19dbf457., pid=6, masterSystemTime=1732148339610 2024-11-21T00:18:59,697 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:18:59,697 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:18:59,700 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3658886116178f402a76844a19dbf457, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:18:59,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3658886116178f402a76844a19dbf457, server=5ed4808ef0e6,42853,1732148328016 because future has completed 2024-11-21T00:18:59,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:18:59,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3658886116178f402a76844a19dbf457, server=5ed4808ef0e6,42853,1732148328016 in 256 msec 2024-11-21T00:18:59,725 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:18:59,725 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148339725"}]},"ts":"1732148339725"} 2024-11-21T00:18:59,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:18:59,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=3658886116178f402a76844a19dbf457, ASSIGN in 422 msec 2024-11-21T00:18:59,729 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:18:59,731 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:18:59,735 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 1.3290 sec 2024-11-21T00:19:00,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:19:00,568 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:19:00,569 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:19:00,570 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36869,1732148334327 2024-11-21T00:19:00,570 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@75f3d2d9 2024-11-21T00:19:00,571 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:19:00,573 INFO [HMaster-EventLoopGroup-15-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:19:00,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36869 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:19:00,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36869 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:19:00,578 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:19:00,578 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:00,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36869 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:19:00,580 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:19:00,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36869 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:19:00,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:19:00,636 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => eda309edd44cdafd7e34516d2725f510, NAME => 'test,,1732148340574.eda309edd44cdafd7e34516d2725f510.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b 2024-11-21T00:19:00,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:19:00,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36869 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:19:00,689 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148340574.eda309edd44cdafd7e34516d2725f510.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:19:00,689 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing eda309edd44cdafd7e34516d2725f510, disabling compactions & flushes 2024-11-21T00:19:00,689 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:19:00,689 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:19:00,689 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148340574.eda309edd44cdafd7e34516d2725f510. after waiting 0 ms 2024-11-21T00:19:00,689 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:19:00,689 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:19:00,689 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for eda309edd44cdafd7e34516d2725f510: Waiting for close lock at 1732148340689Disabling compacts and flushes for region at 1732148340689Disabling writes for close at 1732148340689Writing region close event to WAL at 1732148340689Closed at 1732148340689 2024-11-21T00:19:00,691 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:19:00,691 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148340574.eda309edd44cdafd7e34516d2725f510.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148340691"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148340691"}]},"ts":"1732148340691"} 2024-11-21T00:19:00,697 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:19:00,698 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:19:00,699 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148340698"}]},"ts":"1732148340698"} 2024-11-21T00:19:00,705 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:19:00,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=eda309edd44cdafd7e34516d2725f510, ASSIGN}] 2024-11-21T00:19:00,708 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=eda309edd44cdafd7e34516d2725f510, ASSIGN 2024-11-21T00:19:00,710 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=eda309edd44cdafd7e34516d2725f510, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,35605,1732148334460; forceNewPlan=false, retain=false 2024-11-21T00:19:00,860 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eda309edd44cdafd7e34516d2725f510, regionState=OPENING, regionLocation=5ed4808ef0e6,35605,1732148334460 2024-11-21T00:19:00,863 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=eda309edd44cdafd7e34516d2725f510, ASSIGN because future has completed 2024-11-21T00:19:00,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eda309edd44cdafd7e34516d2725f510, server=5ed4808ef0e6,35605,1732148334460}] 2024-11-21T00:19:00,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36869 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:19:01,019 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:19:01,019 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => eda309edd44cdafd7e34516d2725f510, NAME => 'test,,1732148340574.eda309edd44cdafd7e34516d2725f510.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:19:01,020 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:19:01,020 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:19:01,020 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,020 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148340574.eda309edd44cdafd7e34516d2725f510.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:19:01,020 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,020 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,022 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,023 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eda309edd44cdafd7e34516d2725f510 columnFamilyName f 2024-11-21T00:19:01,023 DEBUG [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:01,023 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] regionserver.HStore(327): Store=eda309edd44cdafd7e34516d2725f510/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:19:01,023 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,024 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eda309edd44cdafd7e34516d2725f510 columnFamilyName f1 2024-11-21T00:19:01,024 DEBUG [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:01,025 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] regionserver.HStore(327): Store=eda309edd44cdafd7e34516d2725f510/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:19:01,025 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,025 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eda309edd44cdafd7e34516d2725f510 columnFamilyName norep 2024-11-21T00:19:01,025 DEBUG [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:01,026 INFO [StoreOpener-eda309edd44cdafd7e34516d2725f510-1 {}] regionserver.HStore(327): Store=eda309edd44cdafd7e34516d2725f510/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:19:01,026 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,026 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,027 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,028 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,028 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,028 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:19:01,029 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,031 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:19:01,032 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened eda309edd44cdafd7e34516d2725f510; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73805270, jitterRate=0.09978422522544861}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:19:01,032 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:19:01,032 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for eda309edd44cdafd7e34516d2725f510: Running coprocessor pre-open hook at 1732148341020Writing region info on filesystem at 1732148341020Initializing all the Stores at 1732148341021 (+1 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148341021Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148341021Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148341021Cleaning up temporary data from old regions at 1732148341028 (+7 ms)Running coprocessor post-open hooks at 1732148341032 (+4 ms)Region opened successfully at 1732148341032 2024-11-21T00:19:01,033 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148340574.eda309edd44cdafd7e34516d2725f510., pid=6, masterSystemTime=1732148341015 2024-11-21T00:19:01,035 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:19:01,035 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:19:01,035 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=eda309edd44cdafd7e34516d2725f510, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,35605,1732148334460 2024-11-21T00:19:01,038 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure eda309edd44cdafd7e34516d2725f510, server=5ed4808ef0e6,35605,1732148334460 because future has completed 2024-11-21T00:19:01,042 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:19:01,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure eda309edd44cdafd7e34516d2725f510, server=5ed4808ef0e6,35605,1732148334460 in 177 msec 2024-11-21T00:19:01,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:19:01,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=eda309edd44cdafd7e34516d2725f510, ASSIGN in 337 msec 2024-11-21T00:19:01,047 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:19:01,047 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148341047"}]},"ts":"1732148341047"} 2024-11-21T00:19:01,049 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:19:01,050 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:19:01,052 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 476 msec 2024-11-21T00:19:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36869 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:19:01,207 INFO [RPCClient-NioEventLoopGroup-4-5 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:19:01,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58f93e94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,208 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34855,-1 for getting cluster id 2024-11-21T00:19:01,208 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:01,209 DEBUG [HMaster-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a26cfe3b-fea0-49aa-8533-f1a70be2e18f' 2024-11-21T00:19:01,210 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:01,210 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a26cfe3b-fea0-49aa-8533-f1a70be2e18f" 2024-11-21T00:19:01,210 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49fc1385, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,210 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34855,-1] 2024-11-21T00:19:01,210 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:01,211 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:01,212 INFO [HMaster-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33616, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:01,213 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c3c861d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14f0cd7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,214 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36869,-1 for getting cluster id 2024-11-21T00:19:01,214 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:01,215 DEBUG [HMaster-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '281f17dd-ba1b-4202-86e4-c12531773b29' 2024-11-21T00:19:01,215 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:01,215 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "281f17dd-ba1b-4202-86e4-c12531773b29" 2024-11-21T00:19:01,215 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab56ef8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,215 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36869,-1] 2024-11-21T00:19:01,216 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:01,216 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:01,217 INFO [HMaster-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55730, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:01,218 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e7da2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,218 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22ab1c30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,219 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34855,-1 for getting cluster id 2024-11-21T00:19:01,219 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:01,220 DEBUG [HMaster-EventLoopGroup-13-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a26cfe3b-fea0-49aa-8533-f1a70be2e18f' 2024-11-21T00:19:01,220 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:01,220 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a26cfe3b-fea0-49aa-8533-f1a70be2e18f" 2024-11-21T00:19:01,220 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e94f39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,220 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34855,-1] 2024-11-21T00:19:01,221 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:01,221 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:01,222 INFO [HMaster-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33644, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:01,222 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc084d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,224 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:19:01,226 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34855,1732148327840 2024-11-21T00:19:01,226 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5cb11289 2024-11-21T00:19:01,226 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:19:01,227 INFO [HMaster-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33652, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:19:01,228 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:36869,replicationEndpointImpl=null,replicateAllUserTables=false,tableCFs={test=[f]},bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:19:01,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:19:01,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:19:01,231 DEBUG [PEWorker-2 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36869' 2024-11-21T00:19:01,232 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10c76d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,232 DEBUG [PEWorker-2 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36869,-1 for getting cluster id 2024-11-21T00:19:01,232 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:01,233 DEBUG [HMaster-EventLoopGroup-15-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '281f17dd-ba1b-4202-86e4-c12531773b29' 2024-11-21T00:19:01,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:01,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "281f17dd-ba1b-4202-86e4-c12531773b29" 2024-11-21T00:19:01,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a7bf7a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36869,-1] 2024-11-21T00:19:01,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:01,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:01,237 INFO [HMaster-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55750, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:01,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@496e62ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:01,238 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:19:01,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36869,1732148334327 2024-11-21T00:19:01,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@73aa7d8c 2024-11-21T00:19:01,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:19:01,240 INFO [HMaster-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55764, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:19:01,241 INFO [PEWorker-2 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-2. 2024-11-21T00:19:01,241 DEBUG [PEWorker-2 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:19:01,241 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:01,241 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:01,242 INFO [PEWorker-2 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:19:01,243 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:19:01,243 DEBUG [PEWorker-2 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:19:01,245 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:19:01,245 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:01,247 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:19:01,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:19:01,293 DEBUG [PEWorker-2 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:19:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:19:01,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:19:01,658 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d2edabe9f0a5eaaf9af81acd5798eca0, NAME => 'hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da 2024-11-21T00:19:01,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:19:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:19:02,085 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:19:02,085 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing d2edabe9f0a5eaaf9af81acd5798eca0, disabling compactions & flushes 2024-11-21T00:19:02,085 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:19:02,085 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:19:02,085 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. after waiting 0 ms 2024-11-21T00:19:02,085 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:19:02,085 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:19:02,085 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for d2edabe9f0a5eaaf9af81acd5798eca0: Waiting for close lock at 1732148342085Disabling compacts and flushes for region at 1732148342085Disabling writes for close at 1732148342085Writing region close event to WAL at 1732148342085Closed at 1732148342085 2024-11-21T00:19:02,089 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:19:02,089 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148342089"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148342089"}]},"ts":"1732148342089"} 2024-11-21T00:19:02,096 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:19:02,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:19:02,105 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148342104"}]},"ts":"1732148342104"} 2024-11-21T00:19:02,110 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:19:02,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d2edabe9f0a5eaaf9af81acd5798eca0, ASSIGN}] 2024-11-21T00:19:02,113 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d2edabe9f0a5eaaf9af81acd5798eca0, ASSIGN 2024-11-21T00:19:02,115 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=d2edabe9f0a5eaaf9af81acd5798eca0, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,42853,1732148328016; forceNewPlan=false, retain=false 2024-11-21T00:19:02,266 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d2edabe9f0a5eaaf9af81acd5798eca0, regionState=OPENING, regionLocation=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:19:02,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=d2edabe9f0a5eaaf9af81acd5798eca0, ASSIGN because future has completed 2024-11-21T00:19:02,278 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2edabe9f0a5eaaf9af81acd5798eca0, server=5ed4808ef0e6,42853,1732148328016}] 2024-11-21T00:19:02,332 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:19:02,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:19:02,464 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:19:02,464 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:19:02,465 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:19:02,469 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C42853%2C1732148328016.rep, suffix=, logDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016, archiveDir=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/oldWALs, maxLogs=10 2024-11-21T00:19:02,547 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.rep.1732148342469, exclude list is [], retry=0 2024-11-21T00:19:02,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38023,DS-7801d401-6f3f-4630-a52f-d30dd62f106d,DISK] 2024-11-21T00:19:02,561 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.rep.1732148342469 2024-11-21T00:19:02,562 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45785:45785)] 2024-11-21T00:19:02,562 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => d2edabe9f0a5eaaf9af81acd5798eca0, NAME => 'hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:19:02,562 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:19:02,563 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:19:02,563 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. service=MultiRowMutationService 2024-11-21T00:19:02,563 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:19:02,563 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,563 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:19:02,563 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,563 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,570 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,573 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2edabe9f0a5eaaf9af81acd5798eca0 columnFamilyName hfileref 2024-11-21T00:19:02,573 DEBUG [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:02,574 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] regionserver.HStore(327): Store=d2edabe9f0a5eaaf9af81acd5798eca0/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:19:02,574 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,576 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2edabe9f0a5eaaf9af81acd5798eca0 columnFamilyName queue 2024-11-21T00:19:02,577 DEBUG [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:02,578 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] regionserver.HStore(327): Store=d2edabe9f0a5eaaf9af81acd5798eca0/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:19:02,579 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,581 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2edabe9f0a5eaaf9af81acd5798eca0 columnFamilyName sid 2024-11-21T00:19:02,581 DEBUG [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:19:02,582 INFO [StoreOpener-d2edabe9f0a5eaaf9af81acd5798eca0-1 {}] regionserver.HStore(327): Store=d2edabe9f0a5eaaf9af81acd5798eca0/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:19:02,582 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,585 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,585 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,589 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,589 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,590 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:19:02,592 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,595 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:19:02,595 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened d2edabe9f0a5eaaf9af81acd5798eca0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72887605, jitterRate=0.0861099511384964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:19:02,596 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:19:02,597 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for d2edabe9f0a5eaaf9af81acd5798eca0: Running coprocessor pre-open hook at 1732148342563Writing region info on filesystem at 1732148342563Initializing all the Stores at 1732148342567 (+4 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148342567Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148342568 (+1 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148342569 (+1 ms)Cleaning up temporary data from old regions at 1732148342589 (+20 ms)Running coprocessor post-open hooks at 1732148342596 (+7 ms)Region opened successfully at 1732148342596 2024-11-21T00:19:02,600 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0., pid=10, masterSystemTime=1732148342436 2024-11-21T00:19:02,606 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:19:02,606 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:19:02,607 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d2edabe9f0a5eaaf9af81acd5798eca0, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,42853,1732148328016 2024-11-21T00:19:02,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2edabe9f0a5eaaf9af81acd5798eca0, server=5ed4808ef0e6,42853,1732148328016 because future has completed 2024-11-21T00:19:02,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:19:02,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure d2edabe9f0a5eaaf9af81acd5798eca0, server=5ed4808ef0e6,42853,1732148328016 in 334 msec 2024-11-21T00:19:02,628 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:19:02,629 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d2edabe9f0a5eaaf9af81acd5798eca0, ASSIGN in 512 msec 2024-11-21T00:19:02,631 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:19:02,632 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148342631"}]},"ts":"1732148342631"} 2024-11-21T00:19:02,638 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:19:02,643 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:19:02,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 1.4010 sec 2024-11-21T00:19:02,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2] 2024-11-21T00:19:02,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:19:02,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:19:02,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:19:02,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42853 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:19:02,932 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:19:02,983 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,42853,1732148328016, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:19:02,984 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:19:02,984 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42853,1732148328016, seqNum=-1] 2024-11-21T00:19:02,984 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:19:02,986 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52471, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=ClientService 2024-11-21T00:19:02,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,42853,1732148328016', locateType=CURRENT is [region=hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2] 2024-11-21T00:19:03,028 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:19:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:19:03,040 INFO [PEWorker-5 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,42853,1732148328016 suceeded 2024-11-21T00:19:03,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:19:03,046 INFO [PEWorker-1 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:36869,replicationEndpointImpl=null,replicateAllUserTables=false,tableCFs={test=[f]},bandwidth=0,serial=false 2024-11-21T00:19:03,046 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 265 msec 2024-11-21T00:19:03,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.8180 sec 2024-11-21T00:19:03,063 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:36869' 2024-11-21T00:19:03,092 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@639af9ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:03,092 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36869,-1 for getting cluster id 2024-11-21T00:19:03,092 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:03,097 DEBUG [HMaster-EventLoopGroup-15-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '281f17dd-ba1b-4202-86e4-c12531773b29' 2024-11-21T00:19:03,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:03,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "281f17dd-ba1b-4202-86e4-c12531773b29" 2024-11-21T00:19:03,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@72721a3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:03,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36869,-1] 2024-11-21T00:19:03,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:03,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:03,105 INFO [HMaster-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53442, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:03,108 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@43efaa2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:03,108 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:19:03,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36869,1732148334327 2024-11-21T00:19:03,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@451af59b 2024-11-21T00:19:03,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:19:03,112 INFO [HMaster-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53454, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=MasterService 2024-11-21T00:19:03,114 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,42853,1732148328016 (queues=1) is replicating from cluster=a26cfe3b-fea0-49aa-8533-f1a70be2e18f to cluster=281f17dd-ba1b-4202-86e4-c12531773b29 2024-11-21T00:19:03,115 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C42853%2C1732148328016 2024-11-21T00:19:03,116 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,42853,1732148328016, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:19:03,125 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466, startPosition=0, beingWritten=true 2024-11-21T00:19:03,144 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C42853%2C1732148328016 2024-11-21T00:19:03,214 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:19:03,219 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 379, reset compression=false 2024-11-21T00:19:03,215 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,42853,1732148328016 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:19:03,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34855 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:19:03,398 INFO [RPCClient-NioEventLoopGroup-4-12 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:19:03,400 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:19:03,400 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:632) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileReplicationForConfiguredTableCfs(TestMasterReplication.java:365) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:19:03,400 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:03,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:03,401 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:19:03,404 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:19:03,422 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_0 is 36, key is aaaa/f:row/1732148343420/Put/seqid=0 2024-11-21T00:19:03,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741840_1016 (size=7894) 2024-11-21T00:19:03,452 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 379, reset compression=false 2024-11-21T00:19:03,614 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:19:03,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:19:03,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:19:03,657 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:19:03,776 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 379, reset compression=false 2024-11-21T00:19:03,846 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_1 is 34, key is ddd/f:row/1732148343845/Put/seqid=0 2024-11-21T00:19:03,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741841_1017 (size=7691) 2024-11-21T00:19:04,204 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 379, reset compression=false 2024-11-21T00:19:04,257 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ea9df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:04,257 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34855,-1 for getting cluster id 2024-11-21T00:19:04,258 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:04,259 DEBUG [HMaster-EventLoopGroup-13-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a26cfe3b-fea0-49aa-8533-f1a70be2e18f' 2024-11-21T00:19:04,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:04,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a26cfe3b-fea0-49aa-8533-f1a70be2e18f" 2024-11-21T00:19:04,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21ff9b25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:04,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34855,-1] 2024-11-21T00:19:04,260 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:04,260 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:04,262 INFO [HMaster-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55632, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:04,263 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f63b8b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:04,263 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:19:04,267 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42853,1732148328016, seqNum=-1] 2024-11-21T00:19:04,268 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:19:04,269 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33638, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:19:04,285 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:19:04,287 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34855,1732148327840 2024-11-21T00:19:04,287 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4e05f8d3 2024-11-21T00:19:04,291 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:19:04,292 INFO [HMaster-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55648, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:19:04,304 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2] 2024-11-21T00:19:04,322 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:19:04,351 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_0 first=Optional[aaaa] last=Optional[cccc] 2024-11-21T00:19:04,358 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_1 first=Optional[ddd] last=Optional[fff] 2024-11-21T00:19:04,383 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_0 for inclusion in 3658886116178f402a76844a19dbf457/f 2024-11-21T00:19:04,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(626): HFile bounds: first=aaaa last=cccc 2024-11-21T00:19:04,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:19:04,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_1 for inclusion in 3658886116178f402a76844a19dbf457/f 2024-11-21T00:19:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(626): HFile bounds: first=ddd last=fff 2024-11-21T00:19:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:19:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HRegion(2603): Flush status journal for 3658886116178f402a76844a19dbf457: 2024-11-21T00:19:04,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_0 to hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_0 2024-11-21T00:19:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_0 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ 2024-11-21T00:19:04,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f/f/hfile_1 to hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_1 2024-11-21T00:19:04,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_1 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/6ced9b416f23498790be20af4137552c_SeqId_4_ 2024-11-21T00:19:04,440 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_0 into 3658886116178f402a76844a19dbf457/f as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ - updating store file list. 2024-11-21T00:19:04,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:19:04,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ into 3658886116178f402a76844a19dbf457/f 2024-11-21T00:19:04,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_0 into 3658886116178f402a76844a19dbf457/f (new location: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_) 2024-11-21T00:19:04,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_0 2024-11-21T00:19:04,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_1 into 3658886116178f402a76844a19dbf457/f as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/6ced9b416f23498790be20af4137552c_SeqId_4_ - updating store file list. 2024-11-21T00:19:04,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 6ced9b416f23498790be20af4137552c_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:19:04,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/6ced9b416f23498790be20af4137552c_SeqId_4_ into 3658886116178f402a76844a19dbf457/f 2024-11-21T00:19:04,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_1 into 3658886116178f402a76844a19dbf457/f (new location: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f/6ced9b416f23498790be20af4137552c_SeqId_4_) 2024-11-21T00:19:04,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__doptd9khs1chfp0h0io3t147mr3mufepdeu5h1vgqtvu4n5ir4nd9u374q9hbcvl/f/hfile_1 2024-11-21T00:19:04,545 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:19:04,545 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.replication.TestMasterReplication.loadAndValidateHFileReplication(TestMasterReplication.java:720) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileReplicationForConfiguredTableCfs(TestMasterReplication.java:375) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:19:04,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:04,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:04,545 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:19:04,548 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.AsyncRegionLocatorHelper(64): Try updating region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2 , the old value is region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=5ed4808ef0e6:42853 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:19:04,549 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:19:04,549 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.AsyncRegionLocatorHelper(88): Try removing region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2 from cache 2024-11-21T00:19:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 264 connection: 172.17.0.2:33638 deadline: 1732148404550 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:33638 2024-11-21T00:19:04,557 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:19:04,561 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35605,1732148334460, seqNum=-1] 2024-11-21T00:19:04,562 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:19:04,565 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43072, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:19:04,568 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148340574.eda309edd44cdafd7e34516d2725f510., hostname=5ed4808ef0e6,35605,1732148334460, seqNum=2] 2024-11-21T00:19:04,577 INFO [Time-limited test {}] replication.TestMasterReplication(739): Waiting more time for bulkloaded data replication. 2024-11-21T00:19:04,660 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'test', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:42853 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-11-21T00:19:04,710 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 379, reset compression=false 2024-11-21T00:19:04,726 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:19:04,726 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,42853,1732148328016 got entry batch from reader: WALEntryBatch [walEntries=[{test/3658886116178f402a76844a19dbf457/5=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148344527/Put/vlen=190/seqid=0; >],8098}], lastWalPath=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466, lastWalPosition=687, nbRowKeys=1, nbHFiles=2, heapSize=8098, lastSeqIds={}, endOfFile=false,usedBufferSize=407] 2024-11-21T00:19:04,767 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:19:04,775 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43074, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=AdminService 2024-11-21T00:19:04,777 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.ReplicationSink(318): Replicating [a26cfe3b-fea0-49aa-8533-f1a70be2e18f] bulk loaded data 2024-11-21T00:19:04,788 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@59ebfc70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:04,789 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,36869,-1 for getting cluster id 2024-11-21T00:19:04,789 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:04,790 DEBUG [HMaster-EventLoopGroup-15-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '281f17dd-ba1b-4202-86e4-c12531773b29' 2024-11-21T00:19:04,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:04,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "281f17dd-ba1b-4202-86e4-c12531773b29" 2024-11-21T00:19:04,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@5750b110, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:04,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,36869,-1] 2024-11-21T00:19:04,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:04,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:04,798 INFO [HMaster-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53466, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:04,805 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@f9903fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:04,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741837_1013 (size=7894) 2024-11-21T00:19:04,926 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 687, reset compression=false 2024-11-21T00:19:05,245 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 687, reset compression=false 2024-11-21T00:19:05,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741838_1014 (size=7691) 2024-11-21T00:19:05,581 INFO [Time-limited test {}] replication.TestMasterReplication(739): Waiting more time for bulkloaded data replication. 2024-11-21T00:19:05,681 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 687, reset compression=false 2024-11-21T00:19:05,755 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:19:05,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,36869,1732148334327 2024-11-21T00:19:05,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@49a381cf 2024-11-21T00:19:05,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:19:05,758 INFO [HMaster-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53478, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=MasterService 2024-11-21T00:19:05,760 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:19:05,761 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:19:05,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35605,1732148334460, seqNum=-1] 2024-11-21T00:19:05,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:19:05,765 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43078, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=ClientService 2024-11-21T00:19:05,790 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ first=Optional[aaaa] last=Optional[cccc] 2024-11-21T00:19:05,800 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/6ced9b416f23498790be20af4137552c_SeqId_4_ first=Optional[ddd] last=Optional[fff] 2024-11-21T00:19:05,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ for inclusion in eda309edd44cdafd7e34516d2725f510/f 2024-11-21T00:19:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(626): HFile bounds: first=aaaa last=cccc 2024-11-21T00:19:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:19:05,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/6ced9b416f23498790be20af4137552c_SeqId_4_ for inclusion in eda309edd44cdafd7e34516d2725f510/f 2024-11-21T00:19:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(626): HFile bounds: first=ddd last=fff 2024-11-21T00:19:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:19:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HRegion(2603): Flush status journal for eda309edd44cdafd7e34516d2725f510: 2024-11-21T00:19:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:19:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/acb6e1177c9b46c7bf35346847eb91eb_SeqId_4_ 2024-11-21T00:19:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 6ced9b416f23498790be20af4137552c_SeqId_4_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:19:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/6ced9b416f23498790be20af4137552c_SeqId_4_ as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/523a98f2c2104d4ca1154edad70b35cc_SeqId_4_ 2024-11-21T00:19:05,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ into eda309edd44cdafd7e34516d2725f510/f as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/acb6e1177c9b46c7bf35346847eb91eb_SeqId_4_ - updating store file list. 2024-11-21T00:19:05,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStoreFile(483): HFile Bloom filter type for acb6e1177c9b46c7bf35346847eb91eb_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:19:05,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/acb6e1177c9b46c7bf35346847eb91eb_SeqId_4_ into eda309edd44cdafd7e34516d2725f510/f 2024-11-21T00:19:05,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ into eda309edd44cdafd7e34516d2725f510/f (new location: hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/acb6e1177c9b46c7bf35346847eb91eb_SeqId_4_) 2024-11-21T00:19:05,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_ 2024-11-21T00:19:05,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/6ced9b416f23498790be20af4137552c_SeqId_4_ into eda309edd44cdafd7e34516d2725f510/f as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/523a98f2c2104d4ca1154edad70b35cc_SeqId_4_ - updating store file list. 2024-11-21T00:19:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 523a98f2c2104d4ca1154edad70b35cc_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:19:05,840 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/523a98f2c2104d4ca1154edad70b35cc_SeqId_4_ into eda309edd44cdafd7e34516d2725f510/f 2024-11-21T00:19:05,840 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/6ced9b416f23498790be20af4137552c_SeqId_4_ into eda309edd44cdafd7e34516d2725f510/f (new location: hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/f/523a98f2c2104d4ca1154edad70b35cc_SeqId_4_) 2024-11-21T00:19:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/staging/jenkins.hfs.6__test__cubaauu4iof1mksg37fe7b5kcm23socddlluting38f3tp8roi9prdoub75a2o50/f/6ced9b416f23498790be20af4137552c_SeqId_4_ 2024-11-21T00:19:05,843 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35605 {}] regionserver.ReplicationSink(324): Finished replicating [a26cfe3b-fea0-49aa-8533-f1a70be2e18f] bulk loaded data 2024-11-21T00:19:06,193 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 687, reset compression=false 2024-11-21T00:19:06,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_test 2024-11-21T00:19:06,308 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_test Metrics about Tables on a single HBase RegionServer 2024-11-21T00:19:06,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_replication 2024-11-21T00:19:06,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_replication Metrics about Tables on a single HBase RegionServer 2024-11-21T00:19:06,310 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:19:06,310 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter Metrics about HBase RegionObservers 2024-11-21T00:19:06,605 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:19:06,612 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_0 is 37, key is gggg/f1:row/1732148346611/Put/seqid=0 2024-11-21T00:19:06,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741842_1018 (size=7997) 2024-11-21T00:19:06,801 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 687, reset compression=false 2024-11-21T00:19:07,023 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_1 is 35, key is jjj/f1:row/1732148347022/Put/seqid=0 2024-11-21T00:19:07,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741843_1019 (size=7794) 2024-11-21T00:19:07,433 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@578013e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:07,434 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34855,-1 for getting cluster id 2024-11-21T00:19:07,434 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:19:07,435 DEBUG [HMaster-EventLoopGroup-13-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a26cfe3b-fea0-49aa-8533-f1a70be2e18f' 2024-11-21T00:19:07,436 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:19:07,436 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a26cfe3b-fea0-49aa-8533-f1a70be2e18f" 2024-11-21T00:19:07,436 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@287b2d00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:07,436 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34855,-1] 2024-11-21T00:19:07,437 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:19:07,437 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:07,438 INFO [HMaster-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55670, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:19:07,441 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e2a5337, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:19:07,441 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:19:07,443 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42853,1732148328016, seqNum=-1] 2024-11-21T00:19:07,444 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:19:07,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33640, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:19:07,463 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:19:07,465 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34855,1732148327840 2024-11-21T00:19:07,465 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@a8de6cc 2024-11-21T00:19:07,465 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:19:07,466 INFO [HMaster-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55686, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:19:07,470 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2] 2024-11-21T00:19:07,473 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:19:07,485 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_0 first=Optional[gggg] last=Optional[iiii] 2024-11-21T00:19:07,492 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_1 first=Optional[jjj] last=Optional[lll] 2024-11-21T00:19:07,506 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 687, reset compression=false 2024-11-21T00:19:07,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_0 for inclusion in 3658886116178f402a76844a19dbf457/f1 2024-11-21T00:19:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(626): HFile bounds: first=gggg last=iiii 2024-11-21T00:19:07,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:19:07,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_1 for inclusion in 3658886116178f402a76844a19dbf457/f1 2024-11-21T00:19:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(626): HFile bounds: first=jjj last=lll 2024-11-21T00:19:07,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:19:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HRegion(2603): Flush status journal for 3658886116178f402a76844a19dbf457: 2024-11-21T00:19:07,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_0 to hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_0 2024-11-21T00:19:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_0 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/03c7561a86ad4e0e9cb9abc04008049b_SeqId_6_ 2024-11-21T00:19:07,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/load_f1/f1/hfile_1 to hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_1 2024-11-21T00:19:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_1 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/72797544172a4c5eb250b6c0b30316fc_SeqId_6_ 2024-11-21T00:19:07,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.ReplicationSource(279): HFiles will not be replicated belonging to the table test family f1 to peer id 1 2024-11-21T00:19:07,554 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_0 into 3658886116178f402a76844a19dbf457/f1 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/03c7561a86ad4e0e9cb9abc04008049b_SeqId_6_ - updating store file list. 2024-11-21T00:19:07,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 03c7561a86ad4e0e9cb9abc04008049b_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:19:07,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/03c7561a86ad4e0e9cb9abc04008049b_SeqId_6_ into 3658886116178f402a76844a19dbf457/f1 2024-11-21T00:19:07,564 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_0 into 3658886116178f402a76844a19dbf457/f1 (new location: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/03c7561a86ad4e0e9cb9abc04008049b_SeqId_6_) 2024-11-21T00:19:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_0 2024-11-21T00:19:07,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_1 into 3658886116178f402a76844a19dbf457/f1 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/72797544172a4c5eb250b6c0b30316fc_SeqId_6_ - updating store file list. 2024-11-21T00:19:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 72797544172a4c5eb250b6c0b30316fc_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:19:07,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/72797544172a4c5eb250b6c0b30316fc_SeqId_6_ into 3658886116178f402a76844a19dbf457/f1 2024-11-21T00:19:07,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_1 into 3658886116178f402a76844a19dbf457/f1 (new location: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/f1/72797544172a4c5eb250b6c0b30316fc_SeqId_6_) 2024-11-21T00:19:07,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/staging/jenkins__test__kjnjt97nc4dqej3g7kcr1eclt3vgo3h24hruh5lsdtjugr5feg3t3fj1erq1kphu/f1/hfile_1 2024-11-21T00:19:07,577 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:19:07,577 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.replication.TestMasterReplication.loadAndValidateHFileReplication(TestMasterReplication.java:720) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileReplicationForConfiguredTableCfs(TestMasterReplication.java:386) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:19:07,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:07,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:19:07,578 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:19:07,578 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncRegionLocatorHelper(64): Try updating region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2 , the old value is region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=5ed4808ef0e6:42853 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:19:07,578 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:19:07,578 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncRegionLocatorHelper(88): Try removing region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2 from cache 2024-11-21T00:19:07,579 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:19:07,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42853: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 264 connection: 172.17.0.2:33640 deadline: 1732148407578 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:33640 2024-11-21T00:19:07,580 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,42853,1732148328016, seqNum=-1] 2024-11-21T00:19:07,580 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:19:07,582 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33642, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:19:07,585 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148338397.3658886116178f402a76844a19dbf457., hostname=5ed4808ef0e6,42853,1732148328016, seqNum=2] 2024-11-21T00:19:07,688 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'test', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:42853 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-11-21T00:19:08,326 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 687, reset compression=false 2024-11-21T00:19:08,341 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:19:08,341 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:08,341 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,42853,1732148328016 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466, lastWalPosition=997, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:19:08,551 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:08,857 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:09,276 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:09,790 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:10,403 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:11,116 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:11,925 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:12,830 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:13,834 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:14,944 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:16,157 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:17,425 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:19:17,463 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:18,869 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:20,387 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:21,997 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:23,701 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:23,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:19:25,508 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:27,446 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:28,351 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,42853,1732148328016 got entry batch from reader: null 2024-11-21T00:19:29,450 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:31,554 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:31,599 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T00:19:31,599 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T00:19:33,759 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:36,066 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:38,479 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:38,696 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T00:19:38,696 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T00:19:40,982 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:43,589 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:44,615 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3658886116178f402a76844a19dbf457, had cached 0 bytes from a total of 31376 2024-11-21T00:19:46,020 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region eda309edd44cdafd7e34516d2725f510, had cached 0 bytes from a total of 15585 2024-11-21T00:19:46,296 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:47,425 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:19:48,352 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,42853,1732148328016 got entry batch from reader: null 2024-11-21T00:19:49,126 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:51,916 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-21T00:19:52,044 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:53,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:19:55,050 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:19:58,161 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:20:01,365 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:20:04,694 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:20:07,642 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:20:07,643 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:07,643 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileReplicationForConfiguredTableCfs(TestMasterReplication.java:401) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:07,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:07,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:07,643 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:20:07,643 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2024437287, stopped=false 2024-11-21T00:20:07,643 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,36869,1732148334327 2024-11-21T00:20:07,644 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:07,657 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1464671649/running 2024-11-21T00:20:07,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1464671649/running 2024-11-21T00:20:07,657 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:20:07,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:20:07,657 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:07,657 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:07,657 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileReplicationForConfiguredTableCfs(TestMasterReplication.java:401) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:07,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:07,657 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,35605,1732148334460' ***** 2024-11-21T00:20:07,658 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:20:07,658 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:20:07,658 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on znode that does not yet exist, /1-1464671649/running 2024-11-21T00:20:07,658 INFO [RS:0;5ed4808ef0e6:35605 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:20:07,658 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:20:07,658 INFO [RS:0;5ed4808ef0e6:35605 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:20:07,658 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Set watcher on znode that does not yet exist, /1-1464671649/running 2024-11-21T00:20:07,658 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(3091): Received CLOSE for eda309edd44cdafd7e34516d2725f510 2024-11-21T00:20:07,658 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,35605,1732148334460 2024-11-21T00:20:07,659 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:07,659 INFO [RS:0;5ed4808ef0e6:35605 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:35605. 2024-11-21T00:20:07,659 DEBUG [RS:0;5ed4808ef0e6:35605 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:07,659 DEBUG [RS:0;5ed4808ef0e6:35605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:07,659 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:20:07,659 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:20:07,659 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:20:07,659 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:20:07,659 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing eda309edd44cdafd7e34516d2725f510, disabling compactions & flushes 2024-11-21T00:20:07,659 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:20:07,659 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:20:07,660 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148340574.eda309edd44cdafd7e34516d2725f510. after waiting 1 ms 2024-11-21T00:20:07,660 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:20:07,660 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:20:07,660 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1325): Online Regions={eda309edd44cdafd7e34516d2725f510=test,,1732148340574.eda309edd44cdafd7e34516d2725f510., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:20:07,661 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, eda309edd44cdafd7e34516d2725f510 2024-11-21T00:20:07,661 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:07,661 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:07,661 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:07,661 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:07,661 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:07,661 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.27 KB heapSize=3.38 KB 2024-11-21T00:20:07,685 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/default/test/eda309edd44cdafd7e34516d2725f510/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-11-21T00:20:07,686 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:07,686 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:20:07,686 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:20:07,686 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for eda309edd44cdafd7e34516d2725f510: Waiting for close lock at 1732148407659Running coprocessor pre-close hooks at 1732148407659Disabling compacts and flushes for region at 1732148407659Disabling writes for close at 1732148407660 (+1 ms)Writing region close event to WAL at 1732148407661 (+1 ms)Running coprocessor post-close hooks at 1732148407686 (+25 ms)Closed at 1732148407686 2024-11-21T00:20:07,687 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148340574.eda309edd44cdafd7e34516d2725f510. 2024-11-21T00:20:07,694 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/info/06a1a1cd92b74121b0a8c64a029ec20b is 129, key is test,,1732148340574.eda309edd44cdafd7e34516d2725f510./info:regioninfo/1732148341035/Put/seqid=0 2024-11-21T00:20:07,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741839_1015 (size=6421) 2024-11-21T00:20:07,718 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.03 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/info/06a1a1cd92b74121b0a8c64a029ec20b 2024-11-21T00:20:07,736 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:07,754 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/ns/00d046374aeb44e3ac39dcdae58af87f is 43, key is default/ns:d/1732148338241/Put/seqid=0 2024-11-21T00:20:07,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741840_1016 (size=5153) 2024-11-21T00:20:07,782 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/ns/00d046374aeb44e3ac39dcdae58af87f 2024-11-21T00:20:07,820 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/rep_barrier/1d1c7de3a38c4a5894deb6b99a791e07 is 112, key is test,,1732148340574.eda309edd44cdafd7e34516d2725f510./rep_barrier:seqnumDuringOpen/1732148341035/Put/seqid=0 2024-11-21T00:20:07,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741841_1017 (size=5518) 2024-11-21T00:20:07,861 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:08,028 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:20:08,028 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:20:08,061 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:08,101 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:20:08,229 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/rep_barrier/1d1c7de3a38c4a5894deb6b99a791e07 2024-11-21T00:20:08,258 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/table/e07d07275a4943e096cf8a451cd82fb2 is 40, key is test/table:state/1732148341047/Put/seqid=0 2024-11-21T00:20:08,261 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:08,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741842_1018 (size=5165) 2024-11-21T00:20:08,352 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,42853,1732148328016 got entry batch from reader: null 2024-11-21T00:20:08,461 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:08,662 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:20:08,662 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:20:08,662 DEBUG [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:08,666 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=72 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/table/e07d07275a4943e096cf8a451cd82fb2 2024-11-21T00:20:08,673 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/info/06a1a1cd92b74121b0a8c64a029ec20b as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/info/06a1a1cd92b74121b0a8c64a029ec20b 2024-11-21T00:20:08,678 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/info/06a1a1cd92b74121b0a8c64a029ec20b, entries=10, sequenceid=11, filesize=6.3 K 2024-11-21T00:20:08,679 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/ns/00d046374aeb44e3ac39dcdae58af87f as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/ns/00d046374aeb44e3ac39dcdae58af87f 2024-11-21T00:20:08,684 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/ns/00d046374aeb44e3ac39dcdae58af87f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:20:08,685 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/rep_barrier/1d1c7de3a38c4a5894deb6b99a791e07 as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/rep_barrier/1d1c7de3a38c4a5894deb6b99a791e07 2024-11-21T00:20:08,689 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/rep_barrier/1d1c7de3a38c4a5894deb6b99a791e07, entries=1, sequenceid=11, filesize=5.4 K 2024-11-21T00:20:08,690 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/.tmp/table/e07d07275a4943e096cf8a451cd82fb2 as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/table/e07d07275a4943e096cf8a451cd82fb2 2024-11-21T00:20:08,694 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/table/e07d07275a4943e096cf8a451cd82fb2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:20:08,695 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.27 KB/1305, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 1588230740 in 1034ms, sequenceid=11, compaction requested=false 2024-11-21T00:20:08,699 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T00:20:08,700 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:08,700 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:08,700 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:08,700 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148407661Running coprocessor pre-close hooks at 1732148407661Disabling compacts and flushes for region at 1732148407661Disabling writes for close at 1732148407661Obtaining lock to block concurrent updates at 1732148407661Preparing flush snapshotting stores in 1588230740 at 1732148407661Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1305, getHeapSize=3392, getOffHeapSize=0, getCellsCount=15 at 1732148407662 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148407662Flushing 1588230740/info: creating writer at 1732148407662Flushing 1588230740/info: appending metadata at 1732148407693 (+31 ms)Flushing 1588230740/info: closing flushed file at 1732148407693Flushing 1588230740/ns: creating writer at 1732148407731 (+38 ms)Flushing 1588230740/ns: appending metadata at 1732148407753 (+22 ms)Flushing 1588230740/ns: closing flushed file at 1732148407753Flushing 1588230740/rep_barrier: creating writer at 1732148407799 (+46 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148407819 (+20 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148407819Flushing 1588230740/table: creating writer at 1732148408235 (+416 ms)Flushing 1588230740/table: appending metadata at 1732148408257 (+22 ms)Flushing 1588230740/table: closing flushed file at 1732148408258 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58eda521: reopening flushed file at 1732148408672 (+414 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5da6bb35: reopening flushed file at 1732148408678 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@767c7de3: reopening flushed file at 1732148408684 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ef305e3: reopening flushed file at 1732148408689 (+5 ms)Finished flush of dataSize ~1.27 KB/1305, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 1588230740 in 1034ms, sequenceid=11, compaction requested=false at 1732148408695 (+6 ms)Writing region close event to WAL at 1732148408696 (+1 ms)Running coprocessor post-close hooks at 1732148408700 (+4 ms)Closed at 1732148408700 2024-11-21T00:20:08,700 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:08,862 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,35605,1732148334460; all regions closed. 2024-11-21T00:20:08,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741834_1010 (size=2717) 2024-11-21T00:20:08,864 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460/5ed4808ef0e6%2C35605%2C1732148334460.meta.1732148338034.meta not finished, retry = 0 2024-11-21T00:20:08,967 DEBUG [RS:0;5ed4808ef0e6:35605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/oldWALs 2024-11-21T00:20:08,967 INFO [RS:0;5ed4808ef0e6:35605 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C35605%2C1732148334460.meta:.meta(num 1732148338034) 2024-11-21T00:20:08,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741833_1009 (size=1028) 2024-11-21T00:20:08,970 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/WALs/5ed4808ef0e6,35605,1732148334460/5ed4808ef0e6%2C35605%2C1732148334460.1732148337127 not finished, retry = 0 2024-11-21T00:20:09,072 DEBUG [RS:0;5ed4808ef0e6:35605 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/oldWALs 2024-11-21T00:20:09,072 INFO [RS:0;5ed4808ef0e6:35605 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C35605%2C1732148334460:(num 1732148337127) 2024-11-21T00:20:09,072 DEBUG [RS:0;5ed4808ef0e6:35605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:09,072 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:09,073 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:09,073 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:09,074 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:09,074 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:09,074 INFO [RS:0;5ed4808ef0e6:35605 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:35605. 2024-11-21T00:20:09,074 DEBUG [RS:0;5ed4808ef0e6:35605 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:09,074 DEBUG [RS:0;5ed4808ef0e6:35605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:09,074 DEBUG [RS:0;5ed4808ef0e6:35605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:09,074 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:09,074 INFO [RS:0;5ed4808ef0e6:35605 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35605 2024-11-21T00:20:09,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649/rs 2024-11-21T00:20:09,088 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1464671649/rs/5ed4808ef0e6,35605,1732148334460 2024-11-21T00:20:09,088 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:09,099 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,35605,1732148334460] 2024-11-21T00:20:09,109 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-1464671649/draining/5ed4808ef0e6,35605,1732148334460 already deleted, retry=false 2024-11-21T00:20:09,109 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,35605,1732148334460 expired; onlineServers=0 2024-11-21T00:20:09,109 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,36869,1732148334327' ***** 2024-11-21T00:20:09,109 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:20:09,109 INFO [M:0;5ed4808ef0e6:36869 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:09,109 INFO [M:0;5ed4808ef0e6:36869 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:09,110 DEBUG [M:0;5ed4808ef0e6:36869 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:20:09,110 DEBUG [M:0;5ed4808ef0e6:36869 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:20:09,110 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:20:09,110 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148336720 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148336720,5,FailOnTimeoutGroup] 2024-11-21T00:20:09,110 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148336725 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148336725,5,FailOnTimeoutGroup] 2024-11-21T00:20:09,110 INFO [M:0;5ed4808ef0e6:36869 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:09,110 INFO [M:0;5ed4808ef0e6:36869 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:09,110 DEBUG [M:0;5ed4808ef0e6:36869 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:20:09,110 INFO [M:0;5ed4808ef0e6:36869 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:20:09,110 INFO [M:0;5ed4808ef0e6:36869 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:09,110 INFO [M:0;5ed4808ef0e6:36869 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:20:09,110 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:20:09,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1464671649/master 2024-11-21T00:20:09,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1464671649 2024-11-21T00:20:09,120 DEBUG [M:0;5ed4808ef0e6:36869 {}] zookeeper.ZKUtil(347): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Unable to get data of znode /1-1464671649/master because node does not exist (not an error) 2024-11-21T00:20:09,120 WARN [M:0;5ed4808ef0e6:36869 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:20:09,121 INFO [M:0;5ed4808ef0e6:36869 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/.lastflushedseqids 2024-11-21T00:20:09,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741843_1019 (size=173) 2024-11-21T00:20:09,199 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:09,199 DEBUG [pool-688-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35605-0x1015ac1e6590004, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:09,199 INFO [RS:0;5ed4808ef0e6:35605 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:09,199 INFO [RS:0;5ed4808ef0e6:35605 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,35605,1732148334460; zookeeper connection closed. 2024-11-21T00:20:09,199 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7f4fa7b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7f4fa7b 2024-11-21T00:20:09,200 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:20:09,526 INFO [M:0;5ed4808ef0e6:36869 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:20:09,526 INFO [M:0;5ed4808ef0e6:36869 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:20:09,526 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:09,526 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:09,526 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:09,526 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:09,526 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:09,526 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=28.05 KB heapSize=34.16 KB 2024-11-21T00:20:09,541 DEBUG [M:0;5ed4808ef0e6:36869 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e48ea6dbb1c74566843a6e7d860db8aa is 82, key is hbase:meta,,1/info:regioninfo/1732148338207/Put/seqid=0 2024-11-21T00:20:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741844_1020 (size=5672) 2024-11-21T00:20:09,946 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e48ea6dbb1c74566843a6e7d860db8aa 2024-11-21T00:20:09,964 DEBUG [M:0;5ed4808ef0e6:36869 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/07b655a59c62454e8c0f468374c1d175 is 1246, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732148341051/Put/seqid=0 2024-11-21T00:20:09,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741845_1021 (size=6727) 2024-11-21T00:20:10,370 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.50 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/07b655a59c62454e8c0f468374c1d175 2024-11-21T00:20:10,375 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 07b655a59c62454e8c0f468374c1d175 2024-11-21T00:20:10,388 DEBUG [M:0;5ed4808ef0e6:36869 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d417bca320234a479787b3a9d88a0e0a is 69, key is 5ed4808ef0e6,35605,1732148334460/rs:state/1732148336821/Put/seqid=0 2024-11-21T00:20:10,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741846_1022 (size=5156) 2024-11-21T00:20:10,793 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d417bca320234a479787b3a9d88a0e0a 2024-11-21T00:20:10,799 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e48ea6dbb1c74566843a6e7d860db8aa as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e48ea6dbb1c74566843a6e7d860db8aa 2024-11-21T00:20:10,803 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e48ea6dbb1c74566843a6e7d860db8aa, entries=8, sequenceid=58, filesize=5.5 K 2024-11-21T00:20:10,804 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/07b655a59c62454e8c0f468374c1d175 as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/07b655a59c62454e8c0f468374c1d175 2024-11-21T00:20:10,808 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 07b655a59c62454e8c0f468374c1d175 2024-11-21T00:20:10,808 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/07b655a59c62454e8c0f468374c1d175, entries=6, sequenceid=58, filesize=6.6 K 2024-11-21T00:20:10,809 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d417bca320234a479787b3a9d88a0e0a as hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d417bca320234a479787b3a9d88a0e0a 2024-11-21T00:20:10,813 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34125/user/jenkins/test-data/6371018c-4484-b9d5-4174-ab2aa487405b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d417bca320234a479787b3a9d88a0e0a, entries=1, sequenceid=58, filesize=5.0 K 2024-11-21T00:20:10,814 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(3140): Finished flush of dataSize ~28.05 KB/28727, heapSize ~33.87 KB/34680, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1288ms, sequenceid=58, compaction requested=false 2024-11-21T00:20:10,815 INFO [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:10,815 DEBUG [M:0;5ed4808ef0e6:36869 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148409526Disabling compacts and flushes for region at 1732148409526Disabling writes for close at 1732148409526Obtaining lock to block concurrent updates at 1732148409526Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148409526Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=28727, getHeapSize=34920, getOffHeapSize=0, getCellsCount=69 at 1732148409527 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148409527Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148409527Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148409540 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148409540Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148409950 (+410 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148409964 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148409964Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148410375 (+411 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148410388 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148410388Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d6eebd7: reopening flushed file at 1732148410799 (+411 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@785da796: reopening flushed file at 1732148410803 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ec8e621: reopening flushed file at 1732148410808 (+5 ms)Finished flush of dataSize ~28.05 KB/28727, heapSize ~33.87 KB/34680, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1288ms, sequenceid=58, compaction requested=false at 1732148410814 (+6 ms)Writing region close event to WAL at 1732148410815 (+1 ms)Closed at 1732148410815 2024-11-21T00:20:10,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35783 is added to blk_1073741830_1006 (size=33026) 2024-11-21T00:20:10,818 INFO [M:0;5ed4808ef0e6:36869 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:20:10,818 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:10,818 INFO [M:0;5ed4808ef0e6:36869 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36869 2024-11-21T00:20:10,818 INFO [M:0;5ed4808ef0e6:36869 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:10,988 INFO [M:0;5ed4808ef0e6:36869 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:10,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:10,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36869-0x1015ac1e6590003, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:11,010 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fbfa30f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:11,010 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27bec7a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:11,011 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:11,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25ed74cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:11,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c54674c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:11,012 WARN [BP-1240653071-172.17.0.2-1732148331766 heartbeating to localhost/127.0.0.1:34125 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:20:11,012 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:20:11,012 WARN [BP-1240653071-172.17.0.2-1732148331766 heartbeating to localhost/127.0.0.1:34125 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1240653071-172.17.0.2-1732148331766 (Datanode Uuid d593947b-8443-4d6c-bf83-d72092d08959) service to localhost/127.0.0.1:34125 2024-11-21T00:20:11,012 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:20:11,013 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/cluster_f839d63c-e580-846d-0cf4-5a2d6877035b/data/data1/current/BP-1240653071-172.17.0.2-1732148331766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:11,013 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/cluster_f839d63c-e580-846d-0cf4-5a2d6877035b/data/data2/current/BP-1240653071-172.17.0.2-1732148331766 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:11,013 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:20:11,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@32cecb8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:11,019 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6003bd66{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:11,019 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:11,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@221cfbfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:11,019 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@346e4b0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:11,032 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:20:11,032 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:20:11,032 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:11,032 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileReplicationForConfiguredTableCfs(TestMasterReplication.java:401) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:11,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:11,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:11,032 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:11,032 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:20:11,033 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1184778891, stopped=false 2024-11-21T00:20:11,033 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,34855,1732148327840 2024-11-21T00:20:11,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0891878329/running 2024-11-21T00:20:11,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0891878329/running 2024-11-21T00:20:11,053 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:11,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:20:11,053 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:20:11,053 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:11,054 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileReplicationForConfiguredTableCfs(TestMasterReplication.java:401) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:11,054 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:11,054 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on znode that does not yet exist, /0891878329/running 2024-11-21T00:20:11,054 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Set watcher on znode that does not yet exist, /0891878329/running 2024-11-21T00:20:11,054 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,42853,1732148328016' ***** 2024-11-21T00:20:11,054 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:20:11,054 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:20:11,054 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(3091): Received CLOSE for 3658886116178f402a76844a19dbf457 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(3091): Received CLOSE for d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,42853,1732148328016 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:42853. 2024-11-21T00:20:11,055 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3658886116178f402a76844a19dbf457, disabling compactions & flushes 2024-11-21T00:20:11,055 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:20:11,055 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:20:11,055 DEBUG [RS:0;5ed4808ef0e6:42853 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:11,055 DEBUG [RS:0;5ed4808ef0e6:42853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:11,055 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148338397.3658886116178f402a76844a19dbf457. after waiting 0 ms 2024-11-21T00:20:11,055 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:20:11,055 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:20:11,055 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1325): Online Regions={3658886116178f402a76844a19dbf457=test,,1732148338397.3658886116178f402a76844a19dbf457., d2edabe9f0a5eaaf9af81acd5798eca0=hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:20:11,056 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 3658886116178f402a76844a19dbf457, d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:20:11,056 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:11,056 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:11,056 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:11,056 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:11,056 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:11,056 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:20:11,059 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/default/test/3658886116178f402a76844a19dbf457/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:20:11,060 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3658886116178f402a76844a19dbf457: Waiting for close lock at 1732148411055Running coprocessor pre-close hooks at 1732148411055Disabling compacts and flushes for region at 1732148411055Disabling writes for close at 1732148411055Writing region close event to WAL at 1732148411056 (+1 ms)Running coprocessor post-close hooks at 1732148411060 (+4 ms)Closed at 1732148411060 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148338397.3658886116178f402a76844a19dbf457. 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d2edabe9f0a5eaaf9af81acd5798eca0, disabling compactions & flushes 2024-11-21T00:20:11,060 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. after waiting 0 ms 2024-11-21T00:20:11,060 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:20:11,060 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d2edabe9f0a5eaaf9af81acd5798eca0 3/3 column families, dataSize=874 B heapSize=2.19 KB 2024-11-21T00:20:11,073 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/info/b24b299f95764a6489abebb3db8b6fc4 is 147, key is hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0./info:regioninfo/1732148342606/Put/seqid=0 2024-11-21T00:20:11,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741844_1020 (size=7686) 2024-11-21T00:20:11,079 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/.tmp/hfileref/b22aff096b6240c580716da9c5e5cd8e is 74, key is 1/hfileref:3821e71e8fa9414a978e7c54a1ed9880_SeqId_4_/1732148345847/DeleteColumn/seqid=0 2024-11-21T00:20:11,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741845_1021 (size=5203) 2024-11-21T00:20:11,114 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:11,256 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:20:11,334 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:20:11,334 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:20:11,456 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:20:11,478 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/info/b24b299f95764a6489abebb3db8b6fc4 2024-11-21T00:20:11,484 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=280 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/.tmp/hfileref/b22aff096b6240c580716da9c5e5cd8e 2024-11-21T00:20:11,499 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/ns/0de5fbcd457b42c784a35dcd679199eb is 43, key is default/ns:d/1732148331582/Put/seqid=0 2024-11-21T00:20:11,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741846_1022 (size=5153) 2024-11-21T00:20:11,507 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/.tmp/queue/fbe4cb1907d04f03b3caa5e172d28289 is 153, key is 1-5ed4808ef0e6,42853,1732148328016/queue:5ed4808ef0e6%2C42853%2C1732148328016/1732148348349/Put/seqid=0 2024-11-21T00:20:11,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741847_1023 (size=5352) 2024-11-21T00:20:11,605 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 997, reset compression=false 2024-11-21T00:20:11,609 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:20:11,609 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 1292, reset compression=false 2024-11-21T00:20:11,609 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,42853,1732148328016 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466, lastWalPosition=1292, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:20:11,612 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,42853,1732148328016: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,42853,1732148328016, walGroup=5ed4808ef0e6%2C42853%2C1732148328016, offset=5ed4808ef0e6%2C42853%2C1732148328016.1732148330466:1292, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:42853 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:20:11,614 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:20:11,617 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:20:11,618 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:20:11,618 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:20:11,618 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:20:11,618 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1061158912, "init": 1048576000, "max": 2306867200, "used": 446431440 }, "NonHeapMemoryUsage": { "committed": 180617216, "init": 7667712, "max": -1, "used": 175308504 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:20:11,622 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:34855 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:34855 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:20:11,656 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:20:11,813 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 1292, reset compression=false 2024-11-21T00:20:11,856 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d2edabe9f0a5eaaf9af81acd5798eca0 2024-11-21T00:20:11,907 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/ns/0de5fbcd457b42c784a35dcd679199eb 2024-11-21T00:20:11,911 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=594 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/.tmp/queue/fbe4cb1907d04f03b3caa5e172d28289 2024-11-21T00:20:11,918 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/.tmp/hfileref/b22aff096b6240c580716da9c5e5cd8e as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/hfileref/b22aff096b6240c580716da9c5e5cd8e 2024-11-21T00:20:11,923 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/hfileref/b22aff096b6240c580716da9c5e5cd8e, entries=2, sequenceid=10, filesize=5.1 K 2024-11-21T00:20:11,925 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/.tmp/queue/fbe4cb1907d04f03b3caa5e172d28289 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/queue/fbe4cb1907d04f03b3caa5e172d28289 2024-11-21T00:20:11,930 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/queue/fbe4cb1907d04f03b3caa5e172d28289, entries=1, sequenceid=10, filesize=5.2 K 2024-11-21T00:20:11,931 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~874 B/874, heapSize ~1.91 KB/1952, currentSize=0 B/0 for d2edabe9f0a5eaaf9af81acd5798eca0 in 871ms, sequenceid=10, compaction requested=false 2024-11-21T00:20:11,934 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/rep_barrier/8102eaa92ce147579f6ca4b484acf300 is 112, key is test,,1732148338397.3658886116178f402a76844a19dbf457./rep_barrier:seqnumDuringOpen/1732148339698/Put/seqid=0 2024-11-21T00:20:11,936 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/replication/d2edabe9f0a5eaaf9af81acd5798eca0/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=1 2024-11-21T00:20:11,937 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:11,937 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:11,937 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:20:11,938 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d2edabe9f0a5eaaf9af81acd5798eca0: Waiting for close lock at 1732148411060Running coprocessor pre-close hooks at 1732148411060Disabling compacts and flushes for region at 1732148411060Disabling writes for close at 1732148411060Obtaining lock to block concurrent updates at 1732148411060Preparing flush snapshotting stores in d2edabe9f0a5eaaf9af81acd5798eca0 at 1732148411060Finished memstore snapshotting hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0., syncing WAL and waiting on mvcc, flushsize=dataSize=874, getHeapSize=2192, getOffHeapSize=0, getCellsCount=8 at 1732148411061 (+1 ms)Flushing stores of hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. at 1732148411061Flushing d2edabe9f0a5eaaf9af81acd5798eca0/hfileref: creating writer at 1732148411061Flushing d2edabe9f0a5eaaf9af81acd5798eca0/hfileref: appending metadata at 1732148411079 (+18 ms)Flushing d2edabe9f0a5eaaf9af81acd5798eca0/hfileref: closing flushed file at 1732148411079Flushing d2edabe9f0a5eaaf9af81acd5798eca0/queue: creating writer at 1732148411489 (+410 ms)Flushing d2edabe9f0a5eaaf9af81acd5798eca0/queue: appending metadata at 1732148411506 (+17 ms)Flushing d2edabe9f0a5eaaf9af81acd5798eca0/queue: closing flushed file at 1732148411506Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b9848ab: reopening flushed file at 1732148411917 (+411 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2823dabe: reopening flushed file at 1732148411923 (+6 ms)Finished flush of dataSize ~874 B/874, heapSize ~1.91 KB/1952, currentSize=0 B/0 for d2edabe9f0a5eaaf9af81acd5798eca0 in 871ms, sequenceid=10, compaction requested=false at 1732148411931 (+8 ms)Writing region close event to WAL at 1732148411932 (+1 ms)Running coprocessor post-close hooks at 1732148411937 (+5 ms)Closed at 1732148411937 2024-11-21T00:20:11,938 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148341242.d2edabe9f0a5eaaf9af81acd5798eca0. 2024-11-21T00:20:11,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741848_1024 (size=5518) 2024-11-21T00:20:12,056 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:20:12,057 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:20:12,057 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:12,117 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 1292, reset compression=false 2024-11-21T00:20:12,257 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:12,342 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/rep_barrier/8102eaa92ce147579f6ca4b484acf300 2024-11-21T00:20:12,363 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/table/20efe3e46b194a17954a91a4d226bfc7 is 53, key is hbase:replication/table:state/1732148342631/Put/seqid=0 2024-11-21T00:20:12,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741849_1025 (size=5308) 2024-11-21T00:20:12,457 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:12,521 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 1292, reset compression=false 2024-11-21T00:20:12,657 DEBUG [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:12,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:12,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:12,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:12,711 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:12,770 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/table/20efe3e46b194a17954a91a4d226bfc7 2024-11-21T00:20:12,775 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/info/b24b299f95764a6489abebb3db8b6fc4 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/info/b24b299f95764a6489abebb3db8b6fc4 2024-11-21T00:20:12,780 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/info/b24b299f95764a6489abebb3db8b6fc4, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:20:12,781 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/ns/0de5fbcd457b42c784a35dcd679199eb as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/ns/0de5fbcd457b42c784a35dcd679199eb 2024-11-21T00:20:12,787 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/ns/0de5fbcd457b42c784a35dcd679199eb, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:20:12,788 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/rep_barrier/8102eaa92ce147579f6ca4b484acf300 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/rep_barrier/8102eaa92ce147579f6ca4b484acf300 2024-11-21T00:20:12,795 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/rep_barrier/8102eaa92ce147579f6ca4b484acf300, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:20:12,796 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/.tmp/table/20efe3e46b194a17954a91a4d226bfc7 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/table/20efe3e46b194a17954a91a4d226bfc7 2024-11-21T00:20:12,803 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/table/20efe3e46b194a17954a91a4d226bfc7, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:20:12,804 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1748ms, sequenceid=16, compaction requested=false 2024-11-21T00:20:12,804 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:20:12,810 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:20:12,810 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:12,810 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:12,811 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:12,811 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148411056Running coprocessor pre-close hooks at 1732148411056Disabling compacts and flushes for region at 1732148411056Disabling writes for close at 1732148411056Obtaining lock to block concurrent updates at 1732148411056Preparing flush snapshotting stores in 1588230740 at 1732148411056Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148411056Flushing stores of hbase:meta,,1.1588230740 at 1732148411057 (+1 ms)Flushing 1588230740/info: creating writer at 1732148411057Flushing 1588230740/info: appending metadata at 1732148411073 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732148411073Flushing 1588230740/ns: creating writer at 1732148411485 (+412 ms)Flushing 1588230740/ns: appending metadata at 1732148411498 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732148411498Flushing 1588230740/rep_barrier: creating writer at 1732148411913 (+415 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148411933 (+20 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148411934 (+1 ms)Flushing 1588230740/table: creating writer at 1732148412348 (+414 ms)Flushing 1588230740/table: appending metadata at 1732148412362 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732148412362Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@481ff2cc: reopening flushed file at 1732148412774 (+412 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ca656ca: reopening flushed file at 1732148412780 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39c29af2: reopening flushed file at 1732148412787 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@676f7e41: reopening flushed file at 1732148412795 (+8 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1748ms, sequenceid=16, compaction requested=false at 1732148412804 (+9 ms)Writing region close event to WAL at 1732148412806 (+2 ms)Running coprocessor post-close hooks at 1732148412810 (+4 ms)Closed at 1732148412810 2024-11-21T00:20:12,811 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:12,857 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,42853,1732148328016; all regions closed. 2024-11-21T00:20:12,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:20:12,860 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.meta.1732148331471.meta not finished, retry = 0 2024-11-21T00:20:12,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741839_1015 (size=2700) 2024-11-21T00:20:12,963 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.rep.1732148342469 not finished, retry = 0 2024-11-21T00:20:13,025 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 to pos 1292, reset compression=false 2024-11-21T00:20:13,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741832_1008 (size=1300) 2024-11-21T00:20:13,067 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/WALs/5ed4808ef0e6,42853,1732148328016/5ed4808ef0e6%2C42853%2C1732148328016.1732148330466 not finished, retry = 0 2024-11-21T00:20:13,168 DEBUG [RS:0;5ed4808ef0e6:42853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:13,168 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:13,169 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:13,169 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:13,169 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:13,169 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:13,169 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,42853,1732148328016 because: Region server is closing 2024-11-21T00:20:13,169 INFO [RS:0;5ed4808ef0e6:42853 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:42853. 2024-11-21T00:20:13,169 DEBUG [RS:0;5ed4808ef0e6:42853 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:13,170 DEBUG [RS:0;5ed4808ef0e6:42853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:13,170 DEBUG [RS:0;5ed4808ef0e6:42853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:13,170 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:13,222 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:13,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:13,252 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:13,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:13,253 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:13,270 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.shipper5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 terminated 2024-11-21T00:20:13,270 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,42853,1732148328016.replicationSource.wal-reader.5ed4808ef0e6%2C42853%2C1732148328016,1-5ed4808ef0e6,42853,1732148328016 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:20:13,270 INFO [RS:0;5ed4808ef0e6:42853 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42853 2024-11-21T00:20:13,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0891878329/rs/5ed4808ef0e6,42853,1732148328016 2024-11-21T00:20:13,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329/rs 2024-11-21T00:20:13,285 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:13,298 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,42853,1732148328016] 2024-11-21T00:20:13,309 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /0891878329/draining/5ed4808ef0e6,42853,1732148328016 already deleted, retry=false 2024-11-21T00:20:13,309 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,42853,1732148328016 expired; onlineServers=0 2024-11-21T00:20:13,309 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,34855,1732148327840' ***** 2024-11-21T00:20:13,309 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:20:13,309 INFO [M:0;5ed4808ef0e6:34855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:13,309 INFO [M:0;5ed4808ef0e6:34855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:13,309 DEBUG [M:0;5ed4808ef0e6:34855 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:20:13,309 DEBUG [M:0;5ed4808ef0e6:34855 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:20:13,309 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:20:13,309 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148330272 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148330272,5,FailOnTimeoutGroup] 2024-11-21T00:20:13,310 INFO [M:0;5ed4808ef0e6:34855 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:13,310 INFO [M:0;5ed4808ef0e6:34855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:13,310 DEBUG [M:0;5ed4808ef0e6:34855 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:20:13,310 INFO [M:0;5ed4808ef0e6:34855 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:20:13,310 INFO [M:0;5ed4808ef0e6:34855 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:13,310 INFO [M:0;5ed4808ef0e6:34855 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:20:13,310 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148330268 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148330268,5,FailOnTimeoutGroup] 2024-11-21T00:20:13,312 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:20:13,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0891878329/master 2024-11-21T00:20:13,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0891878329 2024-11-21T00:20:13,320 DEBUG [M:0;5ed4808ef0e6:34855 {}] zookeeper.ZKUtil(347): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Unable to get data of znode /0891878329/master because node does not exist (not an error) 2024-11-21T00:20:13,320 WARN [M:0;5ed4808ef0e6:34855 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:20:13,321 INFO [M:0;5ed4808ef0e6:34855 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/.lastflushedseqids 2024-11-21T00:20:13,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741850_1026 (size=263) 2024-11-21T00:20:13,334 INFO [M:0;5ed4808ef0e6:34855 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:20:13,334 INFO [M:0;5ed4808ef0e6:34855 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:20:13,334 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:13,334 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:13,334 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:13,335 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:13,335 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:13,335 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.85 KB heapSize=65.86 KB 2024-11-21T00:20:13,353 DEBUG [M:0;5ed4808ef0e6:34855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ff9e949f6c64901b6fd82984e99dcff is 82, key is hbase:meta,,1/info:regioninfo/1732148331505/Put/seqid=0 2024-11-21T00:20:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741851_1027 (size=5672) 2024-11-21T00:20:13,361 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ff9e949f6c64901b6fd82984e99dcff 2024-11-21T00:20:13,384 DEBUG [M:0;5ed4808ef0e6:34855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8a69a737fedf4921841daf387412002f is 1246, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732148339733/Put/seqid=0 2024-11-21T00:20:13,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741852_1028 (size=7240) 2024-11-21T00:20:13,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:13,399 INFO [RS:0;5ed4808ef0e6:42853 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:13,399 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42853-0x1015ac1e6590001, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:13,399 INFO [RS:0;5ed4808ef0e6:42853 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,42853,1732148328016; zookeeper connection closed. 2024-11-21T00:20:13,399 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@784cad37 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@784cad37 2024-11-21T00:20:13,399 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:20:13,789 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.30 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8a69a737fedf4921841daf387412002f 2024-11-21T00:20:13,794 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8a69a737fedf4921841daf387412002f 2024-11-21T00:20:13,813 DEBUG [M:0;5ed4808ef0e6:34855 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6589dbcc2813417e9bb9b44dcf52f428 is 69, key is 5ed4808ef0e6,42853,1732148328016/rs:state/1732148330303/Put/seqid=0 2024-11-21T00:20:13,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741853_1029 (size=5156) 2024-11-21T00:20:13,817 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6589dbcc2813417e9bb9b44dcf52f428 2024-11-21T00:20:13,822 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1ff9e949f6c64901b6fd82984e99dcff as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1ff9e949f6c64901b6fd82984e99dcff 2024-11-21T00:20:13,826 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1ff9e949f6c64901b6fd82984e99dcff, entries=8, sequenceid=105, filesize=5.5 K 2024-11-21T00:20:13,827 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8a69a737fedf4921841daf387412002f as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8a69a737fedf4921841daf387412002f 2024-11-21T00:20:13,833 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8a69a737fedf4921841daf387412002f 2024-11-21T00:20:13,833 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8a69a737fedf4921841daf387412002f, entries=11, sequenceid=105, filesize=7.1 K 2024-11-21T00:20:13,834 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6589dbcc2813417e9bb9b44dcf52f428 as hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6589dbcc2813417e9bb9b44dcf52f428 2024-11-21T00:20:13,839 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35003/user/jenkins/test-data/5fb5fb98-d97f-2bf0-e103-4b0527e603da/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6589dbcc2813417e9bb9b44dcf52f428, entries=1, sequenceid=105, filesize=5.0 K 2024-11-21T00:20:13,840 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.85 KB/57195, heapSize ~65.56 KB/67136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 505ms, sequenceid=105, compaction requested=false 2024-11-21T00:20:13,844 INFO [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:13,844 DEBUG [M:0;5ed4808ef0e6:34855 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148413334Disabling compacts and flushes for region at 1732148413334Disabling writes for close at 1732148413335 (+1 ms)Obtaining lock to block concurrent updates at 1732148413335Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148413335Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=57195, getHeapSize=67376, getOffHeapSize=0, getCellsCount=122 at 1732148413335Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148413336 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148413336Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148413353 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148413353Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148413366 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148413383 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148413383Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148413794 (+411 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148413812 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148413812Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e9f33c7: reopening flushed file at 1732148413821 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@500cd0f1: reopening flushed file at 1732148413826 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42475339: reopening flushed file at 1732148413833 (+7 ms)Finished flush of dataSize ~55.85 KB/57195, heapSize ~65.56 KB/67136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 505ms, sequenceid=105, compaction requested=false at 1732148413840 (+7 ms)Writing region close event to WAL at 1732148413844 (+4 ms)Closed at 1732148413844 2024-11-21T00:20:13,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38023 is added to blk_1073741830_1006 (size=64526) 2024-11-21T00:20:13,847 INFO [M:0;5ed4808ef0e6:34855 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:20:13,848 INFO [M:0;5ed4808ef0e6:34855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34855 2024-11-21T00:20:13,848 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:13,848 INFO [M:0;5ed4808ef0e6:34855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:14,020 INFO [M:0;5ed4808ef0e6:34855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:14,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:14,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34855-0x1015ac1e6590000, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:14,069 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9df7998{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:14,070 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ecb522c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:14,070 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:14,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35e7e964{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:14,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fc1d38b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:14,071 WARN [BP-1033271834-172.17.0.2-1732148325074 heartbeating to localhost/127.0.0.1:35003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:20:14,071 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:20:14,071 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:20:14,071 WARN [BP-1033271834-172.17.0.2-1732148325074 heartbeating to localhost/127.0.0.1:35003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1033271834-172.17.0.2-1732148325074 (Datanode Uuid 9111521d-3041-4a5e-b511-c11db812c5e1) service to localhost/127.0.0.1:35003 2024-11-21T00:20:14,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8/data/data1/current/BP-1033271834-172.17.0.2-1732148325074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:14,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/cluster_e278b697-4081-aebf-3e13-48025d4956e8/data/data2/current/BP-1033271834-172.17.0.2-1732148325074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:14,072 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:20:14,077 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@358a93e0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:14,077 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@59561a4d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:14,077 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:14,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69ee97df{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:14,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bcf2990{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/c27233af-35ee-5299-0ed8-4bd3eeb29b81/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:14,083 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:20:14,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:20:14,108 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testHFileReplicationForConfiguredTableCfs Thread=158 (was 136) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35003 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-10-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34125 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-11-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:34125 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34125 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Registry-endpoints-refresh-end-points java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:58140) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:58140) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: nioEventLoopGroup-10-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-10-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34125 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:35003 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34125 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35003 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Registry-endpoints-refresh-end-points java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34125 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-11-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=503 (was 464) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=886 (was 715) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=662 (was 2076) 2024-11-21T00:20:14,114 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testBasePeerConfigsRemovalForReplicationPeer Thread=159, OpenFileDescriptor=503, MaxFileDescriptor=1048576, SystemLoadAverage=886, ProcessCount=11, AvailableMemoryMB=662 2024-11-21T00:20:14,126 INFO [Time-limited test {}] replication.TestMasterReplication(517): testBasePeerConfigsForPeerMutations 2024-11-21T00:20:14,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.log.dir so I do NOT create it in target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740 2024-11-21T00:20:14,127 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:20:14,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.tmp.dir so I do NOT create it in target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740 2024-11-21T00:20:14,127 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/7c659c91-e08f-d13b-6acd-b85d54f51991/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:20:14,127 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740 2024-11-21T00:20:14,127 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2, deleteOnExit=true 2024-11-21T00:20:14,130 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2/zookeeper_0, clientPort=62031, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:20:14,130 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62031 2024-11-21T00:20:14,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:20:14,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/test.cache.data in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:20:14,131 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:20:14,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:14,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:14,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:20:14,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:20:14,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:20:14,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:14,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:20:14,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:20:14,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015ac1e6590005, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:20:14,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015ac1e6590002, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:20:14,181 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015ac1e6590005, quorum=127.0.0.1:58140, baseZNode=/1-1464671649 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:20:14,181 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015ac1e6590002, quorum=127.0.0.1:58140, baseZNode=/0891878329 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:20:14,446 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:14,451 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:14,456 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:14,456 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:14,456 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:20:14,457 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:14,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@39c4d09a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:14,458 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a802bd1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:14,561 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60f47f1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/java.io.tmpdir/jetty-localhost-38213-hadoop-hdfs-3_4_1-tests_jar-_-any-16412348784432460876/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:14,562 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1341e1bf{HTTP/1.1, (http/1.1)}{localhost:38213} 2024-11-21T00:20:14,562 INFO [Time-limited test {}] server.Server(415): Started @149112ms 2024-11-21T00:20:14,902 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:14,906 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:14,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:14,907 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:14,907 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:20:14,908 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@58ef04de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:14,909 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2918976{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:15,007 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4dabdac5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/java.io.tmpdir/jetty-localhost-42647-hadoop-hdfs-3_4_1-tests_jar-_-any-17316811265421952837/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:15,007 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6de60506{HTTP/1.1, (http/1.1)}{localhost:42647} 2024-11-21T00:20:15,007 INFO [Time-limited test {}] server.Server(415): Started @149557ms 2024-11-21T00:20:15,009 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:20:15,651 WARN [Thread-1291 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2/data/data1/current/BP-1803312530-172.17.0.2-1732148414147/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:15,651 WARN [Thread-1292 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2/data/data2/current/BP-1803312530-172.17.0.2-1732148414147/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:15,667 WARN [Thread-1279 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:20:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14f9bb50ad28b40f with lease ID 0xe6e65ce657aeb1c6: Processing first storage report for DS-98472a23-70a4-484d-9047-16e1f0420ee3 from datanode DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a76a2ea5-fea7-45d7-9483-ecc9530ddbb1, infoPort=33805, infoSecurePort=0, ipcPort=37333, storageInfo=lv=-57;cid=testClusterID;nsid=779987488;c=1732148414147) 2024-11-21T00:20:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14f9bb50ad28b40f with lease ID 0xe6e65ce657aeb1c6: from storage DS-98472a23-70a4-484d-9047-16e1f0420ee3 node DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a76a2ea5-fea7-45d7-9483-ecc9530ddbb1, infoPort=33805, infoSecurePort=0, ipcPort=37333, storageInfo=lv=-57;cid=testClusterID;nsid=779987488;c=1732148414147), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14f9bb50ad28b40f with lease ID 0xe6e65ce657aeb1c6: Processing first storage report for DS-36a1682e-3bf4-4048-a3c5-ed65a4387a2b from datanode DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a76a2ea5-fea7-45d7-9483-ecc9530ddbb1, infoPort=33805, infoSecurePort=0, ipcPort=37333, storageInfo=lv=-57;cid=testClusterID;nsid=779987488;c=1732148414147) 2024-11-21T00:20:15,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14f9bb50ad28b40f with lease ID 0xe6e65ce657aeb1c6: from storage DS-36a1682e-3bf4-4048-a3c5-ed65a4387a2b node DatanodeRegistration(127.0.0.1:42405, datanodeUuid=a76a2ea5-fea7-45d7-9483-ecc9530ddbb1, infoPort=33805, infoSecurePort=0, ipcPort=37333, storageInfo=lv=-57;cid=testClusterID;nsid=779987488;c=1732148414147), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:15,741 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740 2024-11-21T00:20:15,741 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:15,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:15,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:20:16,151 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0 with version=8 2024-11-21T00:20:16,151 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/hbase-staging 2024-11-21T00:20:16,153 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:16,153 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:16,153 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:16,153 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:16,153 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:16,153 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:16,153 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:20:16,153 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:16,154 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33815 2024-11-21T00:20:16,155 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33815 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:16,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:338150x0, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:16,283 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33815-0x1015ac3425c0000 connected 2024-11-21T00:20:16,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:16,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:20:16,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:20:16,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_test 2024-11-21T00:20:16,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_replication 2024-11-21T00:20:16,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:16,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:20:16,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:16,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:16,364 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:16,364 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0, hbase.cluster.distributed=false 2024-11-21T00:20:16,366 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/acl 2024-11-21T00:20:16,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33815 2024-11-21T00:20:16,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33815 2024-11-21T00:20:16,366 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33815 2024-11-21T00:20:16,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33815 2024-11-21T00:20:16,367 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33815 2024-11-21T00:20:16,380 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:16,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:16,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:16,381 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:16,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:16,381 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:16,381 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:20:16,381 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:16,382 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41953 2024-11-21T00:20:16,384 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41953 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:16,385 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:16,387 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:16,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419530x0, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:16,423 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419530x0, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:16,423 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:20:16,424 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41953-0x1015ac3425c0001 connected 2024-11-21T00:20:16,424 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:20:16,425 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/master 2024-11-21T00:20:16,426 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/acl 2024-11-21T00:20:16,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41953 2024-11-21T00:20:16,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41953 2024-11-21T00:20:16,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41953 2024-11-21T00:20:16,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41953 2024-11-21T00:20:16,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41953 2024-11-21T00:20:16,439 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:33815 2024-11-21T00:20:16,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0-586781601/backup-masters/5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:16,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:16,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:16,453 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on existing znode=/0-586781601/backup-masters/5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:16,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:16,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-586781601/master 2024-11-21T00:20:16,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:16,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on existing znode=/0-586781601/master 2024-11-21T00:20:16,464 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0-586781601/backup-masters/5ed4808ef0e6,33815,1732148416153 from backup master directory 2024-11-21T00:20:16,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/backup-masters/5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:16,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:16,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:16,474 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:16,474 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:16,478 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/hbase.id] with ID: 212ecdbe-1c03-48c4-b613-45677eb0d5d7 2024-11-21T00:20:16,478 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/.tmp/hbase.id 2024-11-21T00:20:16,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:20:16,883 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/.tmp/hbase.id]:[hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/hbase.id] 2024-11-21T00:20:16,896 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:16,896 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:20:16,898 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-21T00:20:16,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:16,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:16,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:20:16,929 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:16,930 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:20:16,931 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:16,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:20:17,337 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store 2024-11-21T00:20:17,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:20:17,747 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:17,748 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:17,748 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:17,748 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:17,748 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:17,748 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:17,748 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:17,748 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148417748Disabling compacts and flushes for region at 1732148417748Disabling writes for close at 1732148417748Writing region close event to WAL at 1732148417748Closed at 1732148417748 2024-11-21T00:20:17,749 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/.initializing 2024-11-21T00:20:17,749 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:17,750 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:17,752 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C33815%2C1732148416153, suffix=, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/oldWALs, maxLogs=10 2024-11-21T00:20:17,771 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752, exclude list is [], retry=0 2024-11-21T00:20:17,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-18-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:17,789 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 2024-11-21T00:20:17,792 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:17,792 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:17,793 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:17,793 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,793 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:20:17,806 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:17,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:17,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:20:17,809 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:17,809 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:17,810 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:20:17,811 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:17,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:17,813 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:20:17,814 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:17,815 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:17,815 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,816 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,816 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,817 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,817 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,820 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:17,824 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:17,830 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:17,830 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63627327, jitterRate=-0.05187894403934479}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:17,831 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148417793Initializing all the Stores at 1732148417794 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148417795 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148417800 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148417800Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148417800Cleaning up temporary data from old regions at 1732148417817 (+17 ms)Region opened successfully at 1732148417831 (+14 ms) 2024-11-21T00:20:17,832 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:20:17,836 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29da77dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:17,837 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:20:17,838 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:20:17,838 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:20:17,838 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:20:17,839 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:20:17,839 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:20:17,839 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:20:17,844 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:20:17,845 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/balancer because node does not exist (not necessarily an error) 2024-11-21T00:20:17,930 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/balancer already deleted, retry=false 2024-11-21T00:20:17,930 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:20:17,931 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:20:17,940 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/normalizer already deleted, retry=false 2024-11-21T00:20:17,941 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:20:17,942 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:20:17,951 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/switch/split already deleted, retry=false 2024-11-21T00:20:17,952 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:20:17,963 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/switch/merge already deleted, retry=false 2024-11-21T00:20:17,966 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:20:17,972 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/snapshot-cleanup already deleted, retry=false 2024-11-21T00:20:17,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:17,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:17,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:17,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:17,984 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,33815,1732148416153, sessionid=0x1015ac3425c0000, setting cluster-up flag (Was=false) 2024-11-21T00:20:18,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:18,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:18,045 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-586781601/flush-table-proc/acquired, /0-586781601/flush-table-proc/reached, /0-586781601/flush-table-proc/abort 2024-11-21T00:20:18,055 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:18,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:18,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:18,137 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-586781601/online-snapshot/acquired, /0-586781601/online-snapshot/reached, /0-586781601/online-snapshot/abort 2024-11-21T00:20:18,140 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:18,141 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:20:18,143 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:18,143 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:20:18,143 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:20:18,144 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,33815,1732148416153 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:18,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,146 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:18,146 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:20:18,149 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:18,149 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:20:18,165 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148448165 2024-11-21T00:20:18,165 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:20:18,166 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:20:18,166 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:20:18,166 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:20:18,166 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:20:18,166 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:20:18,168 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,171 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:20:18,172 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:20:18,172 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:20:18,172 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:20:18,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:20:18,176 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:20:18,176 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:20:18,180 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148418176,5,FailOnTimeoutGroup] 2024-11-21T00:20:18,184 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148418180,5,FailOnTimeoutGroup] 2024-11-21T00:20:18,184 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,184 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:20:18,184 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,184 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,231 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(746): ClusterId : 212ecdbe-1c03-48c4-b613-45677eb0d5d7 2024-11-21T00:20:18,231 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:20:18,243 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:20:18,243 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:20:18,253 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:20:18,254 DEBUG [RS:0;5ed4808ef0e6:41953 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4424a6c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:18,269 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:41953 2024-11-21T00:20:18,269 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:20:18,269 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:20:18,269 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:20:18,270 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,33815,1732148416153 with port=41953, startcode=1732148416380 2024-11-21T00:20:18,270 DEBUG [RS:0;5ed4808ef0e6:41953 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:20:18,272 INFO [HMaster-EventLoopGroup-17-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38021, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:20:18,273 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33815 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:18,273 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33815 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:18,275 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0 2024-11-21T00:20:18,275 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34383 2024-11-21T00:20:18,275 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:20:18,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/rs 2024-11-21T00:20:18,288 DEBUG [RS:0;5ed4808ef0e6:41953 {}] zookeeper.ZKUtil(111): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on existing znode=/0-586781601/rs/5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:18,288 WARN [RS:0;5ed4808ef0e6:41953 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:18,288 INFO [RS:0;5ed4808ef0e6:41953 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:18,288 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:18,288 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,41953,1732148416380] 2024-11-21T00:20:18,292 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:20:18,294 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:20:18,294 INFO [RS:0;5ed4808ef0e6:41953 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:20:18,294 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,294 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:20:18,296 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:20:18,296 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:18,297 DEBUG [RS:0;5ed4808ef0e6:41953 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:18,308 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,308 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,308 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,308 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,308 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,308 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41953,1732148416380-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:18,313 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:18,328 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:20:18,328 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41953,1732148416380-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,329 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,329 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.Replication(171): 5ed4808ef0e6,41953,1732148416380 started 2024-11-21T00:20:18,348 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:18,348 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,41953,1732148416380, RpcServer on 5ed4808ef0e6/172.17.0.2:41953, sessionid=0x1015ac3425c0001 2024-11-21T00:20:18,348 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:20:18,348 DEBUG [RS:0;5ed4808ef0e6:41953 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:18,349 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,41953,1732148416380' 2024-11-21T00:20:18,349 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-586781601/flush-table-proc/abort' 2024-11-21T00:20:18,349 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-586781601/flush-table-proc/acquired' 2024-11-21T00:20:18,350 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:20:18,350 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:20:18,350 DEBUG [RS:0;5ed4808ef0e6:41953 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:18,350 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,41953,1732148416380' 2024-11-21T00:20:18,350 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-586781601/online-snapshot/abort' 2024-11-21T00:20:18,352 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-586781601/online-snapshot/acquired' 2024-11-21T00:20:18,353 DEBUG [RS:0;5ed4808ef0e6:41953 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:20:18,353 INFO [RS:0;5ed4808ef0e6:41953 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:20:18,353 INFO [RS:0;5ed4808ef0e6:41953 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:20:18,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:18,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:18,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:18,357 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:18,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:18,454 INFO [RS:0;5ed4808ef0e6:41953 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:18,456 INFO [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C41953%2C1732148416380, suffix=, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs, maxLogs=10 2024-11-21T00:20:18,473 DEBUG [RS:0;5ed4808ef0e6:41953 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456, exclude list is [], retry=0 2024-11-21T00:20:18,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-18-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:18,481 INFO [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456 2024-11-21T00:20:18,481 DEBUG [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:18,574 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:20:18,575 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0 2024-11-21T00:20:18,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:20:18,980 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:18,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:18,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:18,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:18,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:18,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:18,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:18,984 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:18,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:18,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:18,986 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:18,986 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:18,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:18,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:18,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:18,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:18,988 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:18,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:18,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740 2024-11-21T00:20:18,989 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740 2024-11-21T00:20:18,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:18,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:18,991 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:18,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:18,994 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:18,995 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69743297, jitterRate=0.03925611078739166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:18,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148418980Initializing all the Stores at 1732148418981 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148418981Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148418981Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148418981Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148418981Cleaning up temporary data from old regions at 1732148418991 (+10 ms)Region opened successfully at 1732148418995 (+4 ms) 2024-11-21T00:20:18,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:18,995 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:18,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:18,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:18,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:18,996 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:18,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148418995Disabling compacts and flushes for region at 1732148418995Disabling writes for close at 1732148418995Writing region close event to WAL at 1732148418996 (+1 ms)Closed at 1732148418996 2024-11-21T00:20:18,997 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:18,997 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:20:18,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:20:19,000 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:19,002 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:20:19,152 DEBUG [5ed4808ef0e6:33815 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:20:19,152 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:19,153 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,41953,1732148416380, state=OPENING 2024-11-21T00:20:19,182 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:20:19,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:19,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:19,193 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:19,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:19,193 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:19,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,41953,1732148416380}] 2024-11-21T00:20:19,346 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:20:19,348 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-18-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37141, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:20:19,351 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:20:19,351 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:19,351 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:20:19,353 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C41953%2C1732148416380.meta, suffix=.meta, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs, maxLogs=10 2024-11-21T00:20:19,369 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.meta.1732148419353.meta, exclude list is [], retry=0 2024-11-21T00:20:19,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-18-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:19,373 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.meta.1732148419353.meta 2024-11-21T00:20:19,373 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:19,374 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:19,374 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:19,374 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:19,374 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:20:19,374 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:20:19,374 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:20:19,374 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:19,374 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:20:19,375 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:20:19,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:19,377 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:19,377 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:19,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:19,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:19,378 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:19,378 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:19,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:19,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:19,379 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:19,379 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:19,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:19,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:19,380 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:19,380 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:19,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:19,380 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:19,381 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740 2024-11-21T00:20:19,382 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740 2024-11-21T00:20:19,383 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:19,383 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:19,383 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:19,384 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:19,385 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66481076, jitterRate=-0.009354770183563232}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:19,385 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:20:19,386 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148419375Writing region info on filesystem at 1732148419375Initializing all the Stores at 1732148419376 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148419376Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148419376Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148419376Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148419376Cleaning up temporary data from old regions at 1732148419383 (+7 ms)Running coprocessor post-open hooks at 1732148419385 (+2 ms)Region opened successfully at 1732148419386 (+1 ms) 2024-11-21T00:20:19,387 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148419346 2024-11-21T00:20:19,389 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:20:19,389 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:20:19,390 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:19,391 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,41953,1732148416380, state=OPEN 2024-11-21T00:20:19,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:19,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:19,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:19,499 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:19,499 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:19,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:20:19,503 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,41953,1732148416380 in 307 msec 2024-11-21T00:20:19,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:20:19,504 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 506 msec 2024-11-21T00:20:19,505 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:19,505 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:20:19,506 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:19,506 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,41953,1732148416380, seqNum=-1] 2024-11-21T00:20:19,506 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:19,507 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-18-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45015, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:19,513 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3690 sec 2024-11-21T00:20:19,513 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148419513, completionTime=-1 2024-11-21T00:20:19,513 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:20:19,513 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:20:19,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:20:19,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148479515 2024-11-21T00:20:19,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148539515 2024-11-21T00:20:19,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:20:19,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33815,1732148416153-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:19,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33815,1732148416153-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:19,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33815,1732148416153-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:19,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:33815, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:19,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:19,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:19,518 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.046sec 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33815,1732148416153-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:19,520 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33815,1732148416153-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:20:19,524 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:20:19,524 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:20:19,524 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33815,1732148416153-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:19,540 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34e06858, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:19,540 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,33815,-1 for getting cluster id 2024-11-21T00:20:19,541 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:19,542 DEBUG [HMaster-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '212ecdbe-1c03-48c4-b613-45677eb0d5d7' 2024-11-21T00:20:19,542 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:19,542 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "212ecdbe-1c03-48c4-b613-45677eb0d5d7" 2024-11-21T00:20:19,543 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34f7de2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:19,543 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,33815,-1] 2024-11-21T00:20:19,543 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:19,543 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:19,544 INFO [HMaster-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34994, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:19,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4653e31a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:19,545 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:19,546 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,41953,1732148416380, seqNum=-1] 2024-11-21T00:20:19,547 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:19,548 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-18-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58424, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:19,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:19,551 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:19,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:19,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015ac3425c0002 connected 2024-11-21T00:20:19,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.log.dir so I do NOT create it in target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62 2024-11-21T00:20:19,739 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:20:19,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.tmp.dir so I do NOT create it in target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62 2024-11-21T00:20:19,739 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:20:19,739 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62 2024-11-21T00:20:19,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:20:19,739 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/cluster_717caa8b-a902-095b-3387-8e7184815b96, deleteOnExit=true 2024-11-21T00:20:19,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:20:19,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/test.cache.data in system properties and HBase conf 2024-11-21T00:20:19,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:20:19,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:20:19,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:20:19,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:20:19,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:20:19,740 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:20:19,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:20:19,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:19,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:20:19,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:20:20,082 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:20,088 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:20,090 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:20,090 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:20,090 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:20:20,090 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:20,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c2610f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:20,091 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b8c3687{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:20,219 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7fd11360{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/java.io.tmpdir/jetty-localhost-39469-hadoop-hdfs-3_4_1-tests_jar-_-any-5719525499208149850/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:20,220 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ca7e3da{HTTP/1.1, (http/1.1)}{localhost:39469} 2024-11-21T00:20:20,220 INFO [Time-limited test {}] server.Server(415): Started @154770ms 2024-11-21T00:20:20,592 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:20,595 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:20,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:20,599 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:20,599 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:20:20,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a6ccddd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:20,600 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8bcaca2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:20,710 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6ed81ed8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/java.io.tmpdir/jetty-localhost-46275-hadoop-hdfs-3_4_1-tests_jar-_-any-3951329648134632238/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:20,710 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@76ed34c3{HTTP/1.1, (http/1.1)}{localhost:46275} 2024-11-21T00:20:20,710 INFO [Time-limited test {}] server.Server(415): Started @155260ms 2024-11-21T00:20:20,711 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:20:21,715 WARN [Thread-1413 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/cluster_717caa8b-a902-095b-3387-8e7184815b96/data/data2/current/BP-415794961-172.17.0.2-1732148419765/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:21,715 WARN [Thread-1412 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/cluster_717caa8b-a902-095b-3387-8e7184815b96/data/data1/current/BP-415794961-172.17.0.2-1732148419765/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:21,735 WARN [Thread-1400 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:20:21,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd63464e315dcf407 with lease ID 0x6cb786ff4d48b475: Processing first storage report for DS-4f11d27d-6609-4a0b-939d-c6b8f8795e65 from datanode DatanodeRegistration(127.0.0.1:44331, datanodeUuid=7b4fdc52-703c-45d9-8eb2-e29b54908140, infoPort=37077, infoSecurePort=0, ipcPort=41153, storageInfo=lv=-57;cid=testClusterID;nsid=1565451986;c=1732148419765) 2024-11-21T00:20:21,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd63464e315dcf407 with lease ID 0x6cb786ff4d48b475: from storage DS-4f11d27d-6609-4a0b-939d-c6b8f8795e65 node DatanodeRegistration(127.0.0.1:44331, datanodeUuid=7b4fdc52-703c-45d9-8eb2-e29b54908140, infoPort=37077, infoSecurePort=0, ipcPort=41153, storageInfo=lv=-57;cid=testClusterID;nsid=1565451986;c=1732148419765), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:21,738 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd63464e315dcf407 with lease ID 0x6cb786ff4d48b475: Processing first storage report for DS-4e0528e4-60eb-4c05-bcd9-f904a07b6033 from datanode DatanodeRegistration(127.0.0.1:44331, datanodeUuid=7b4fdc52-703c-45d9-8eb2-e29b54908140, infoPort=37077, infoSecurePort=0, ipcPort=41153, storageInfo=lv=-57;cid=testClusterID;nsid=1565451986;c=1732148419765) 2024-11-21T00:20:21,738 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd63464e315dcf407 with lease ID 0x6cb786ff4d48b475: from storage DS-4e0528e4-60eb-4c05-bcd9-f904a07b6033 node DatanodeRegistration(127.0.0.1:44331, datanodeUuid=7b4fdc52-703c-45d9-8eb2-e29b54908140, infoPort=37077, infoSecurePort=0, ipcPort=41153, storageInfo=lv=-57;cid=testClusterID;nsid=1565451986;c=1732148419765), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:21,743 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62 2024-11-21T00:20:21,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:21,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:20:21,755 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2 with version=8 2024-11-21T00:20:21,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/hbase-staging 2024-11-21T00:20:21,758 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:21,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:21,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:21,758 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:21,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:21,758 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:21,758 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:20:21,758 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:21,759 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37261 2024-11-21T00:20:21,760 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37261 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:21,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:372610x0, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:21,782 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37261-0x1015ac3425c0003 connected 2024-11-21T00:20:21,856 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:21,857 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:21,859 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on znode that does not yet exist, /1-1330347467/running 2024-11-21T00:20:21,859 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2, hbase.cluster.distributed=false 2024-11-21T00:20:21,861 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on znode that does not yet exist, /1-1330347467/acl 2024-11-21T00:20:21,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37261 2024-11-21T00:20:21,861 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37261 2024-11-21T00:20:21,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37261 2024-11-21T00:20:21,862 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37261 2024-11-21T00:20:21,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37261 2024-11-21T00:20:21,879 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:21,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:21,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:21,879 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:21,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:21,879 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:21,879 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:20:21,879 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:21,881 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46081 2024-11-21T00:20:21,882 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46081 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:21,883 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:21,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:21,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460810x0, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:21,898 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:460810x0, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on znode that does not yet exist, /1-1330347467/running 2024-11-21T00:20:21,898 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46081-0x1015ac3425c0004 connected 2024-11-21T00:20:21,899 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:20:21,901 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:20:21,902 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on znode that does not yet exist, /1-1330347467/master 2024-11-21T00:20:21,903 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on znode that does not yet exist, /1-1330347467/acl 2024-11-21T00:20:21,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46081 2024-11-21T00:20:21,908 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46081 2024-11-21T00:20:21,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46081 2024-11-21T00:20:21,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46081 2024-11-21T00:20:21,912 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46081 2024-11-21T00:20:21,930 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:37261 2024-11-21T00:20:21,931 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-1330347467/backup-masters/5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:21,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467/backup-masters 2024-11-21T00:20:21,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467/backup-masters 2024-11-21T00:20:21,940 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on existing znode=/1-1330347467/backup-masters/5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:21,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1330347467/master 2024-11-21T00:20:21,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:21,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:21,951 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on existing znode=/1-1330347467/master 2024-11-21T00:20:21,952 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-1330347467/backup-masters/5ed4808ef0e6,37261,1732148421757 from backup master directory 2024-11-21T00:20:21,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1330347467/backup-masters/5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:21,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467/backup-masters 2024-11-21T00:20:21,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467/backup-masters 2024-11-21T00:20:21,961 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:21,961 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:21,965 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/hbase.id] with ID: 81c523d2-6ef8-4bc8-b27d-9a8b671a4708 2024-11-21T00:20:21,965 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/.tmp/hbase.id 2024-11-21T00:20:21,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:20:22,374 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/.tmp/hbase.id]:[hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/hbase.id] 2024-11-21T00:20:22,386 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:22,386 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:20:22,387 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:20:22,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:22,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:22,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:20:22,816 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:22,816 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:20:22,817 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:22,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:20:23,223 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store 2024-11-21T00:20:23,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:20:23,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:23,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:23,630 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:23,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:23,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:23,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:23,630 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:23,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148423630Disabling compacts and flushes for region at 1732148423630Disabling writes for close at 1732148423630Writing region close event to WAL at 1732148423630Closed at 1732148423630 2024-11-21T00:20:23,631 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/.initializing 2024-11-21T00:20:23,631 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/WALs/5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:23,632 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:23,634 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C37261%2C1732148421757, suffix=, logDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/WALs/5ed4808ef0e6,37261,1732148421757, archiveDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/oldWALs, maxLogs=10 2024-11-21T00:20:23,646 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/WALs/5ed4808ef0e6,37261,1732148421757/5ed4808ef0e6%2C37261%2C1732148421757.1732148423634, exclude list is [], retry=0 2024-11-21T00:20:23,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44331,DS-4f11d27d-6609-4a0b-939d-c6b8f8795e65,DISK] 2024-11-21T00:20:23,651 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/WALs/5ed4808ef0e6,37261,1732148421757/5ed4808ef0e6%2C37261%2C1732148421757.1732148423634 2024-11-21T00:20:23,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37077:37077)] 2024-11-21T00:20:23,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:23,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:23,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,655 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:20:23,658 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:23,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:23,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:20:23,659 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:23,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:23,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,660 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:20:23,660 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:23,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:23,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:20:23,662 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:23,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:23,662 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,663 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,663 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,664 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,664 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,665 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:23,666 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:23,668 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:23,668 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62059623, jitterRate=-0.07523955404758453}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:23,668 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148423652Initializing all the Stores at 1732148423653 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148423653Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148423654 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148423654Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148423654Cleaning up temporary data from old regions at 1732148423664 (+10 ms)Region opened successfully at 1732148423668 (+4 ms) 2024-11-21T00:20:23,669 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:20:23,672 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c5ac39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:23,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:20:23,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:20:23,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:20:23,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:20:23,674 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:20:23,674 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:20:23,674 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:20:23,676 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:20:23,676 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Unable to get data of znode /1-1330347467/balancer because node does not exist (not necessarily an error) 2024-11-21T00:20:23,792 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1330347467/balancer already deleted, retry=false 2024-11-21T00:20:23,793 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:20:23,793 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Unable to get data of znode /1-1330347467/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:20:23,803 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1330347467/normalizer already deleted, retry=false 2024-11-21T00:20:23,803 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:20:23,804 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Unable to get data of znode /1-1330347467/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:20:23,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1330347467/switch/split already deleted, retry=false 2024-11-21T00:20:23,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Unable to get data of znode /1-1330347467/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:20:23,824 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1330347467/switch/merge already deleted, retry=false 2024-11-21T00:20:23,826 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Unable to get data of znode /1-1330347467/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:20:23,834 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1330347467/snapshot-cleanup already deleted, retry=false 2024-11-21T00:20:23,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1330347467/running 2024-11-21T00:20:23,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1330347467/running 2024-11-21T00:20:23,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:23,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:23,845 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,37261,1732148421757, sessionid=0x1015ac3425c0003, setting cluster-up flag (Was=false) 2024-11-21T00:20:23,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:23,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:23,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1330347467/flush-table-proc/acquired, /1-1330347467/flush-table-proc/reached, /1-1330347467/flush-table-proc/abort 2024-11-21T00:20:23,899 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:24,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:24,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:24,182 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1330347467/online-snapshot/acquired, /1-1330347467/online-snapshot/reached, /1-1330347467/online-snapshot/abort 2024-11-21T00:20:24,183 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:24,184 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:20:24,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:24,186 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:20:24,186 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:20:24,194 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,37261,1732148421757 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:24,196 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,198 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:24,199 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:20:24,199 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148454199 2024-11-21T00:20:24,199 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:20:24,199 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:20:24,199 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:20:24,199 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:20:24,199 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:20:24,199 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:20:24,200 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:24,200 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,200 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:20:24,200 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:20:24,201 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:20:24,201 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:20:24,201 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:20:24,205 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:20:24,205 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:20:24,214 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148424205,5,FailOnTimeoutGroup] 2024-11-21T00:20:24,214 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148424214,5,FailOnTimeoutGroup] 2024-11-21T00:20:24,214 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,214 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:20:24,214 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,214 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:20:24,217 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(746): ClusterId : 81c523d2-6ef8-4bc8-b27d-9a8b671a4708 2024-11-21T00:20:24,217 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:20:24,224 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:20:24,225 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:20:24,235 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:20:24,235 DEBUG [RS:0;5ed4808ef0e6:46081 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d2eed7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:24,251 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:46081 2024-11-21T00:20:24,251 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:20:24,251 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:20:24,251 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:20:24,252 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,37261,1732148421757 with port=46081, startcode=1732148421879 2024-11-21T00:20:24,252 DEBUG [RS:0;5ed4808ef0e6:46081 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:20:24,254 INFO [HMaster-EventLoopGroup-19-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38293, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.8 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:20:24,254 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37261 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:24,255 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37261 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:24,256 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2 2024-11-21T00:20:24,256 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46557 2024-11-21T00:20:24,256 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:20:24,292 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:20:24,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467/rs 2024-11-21T00:20:24,317 DEBUG [RS:0;5ed4808ef0e6:46081 {}] zookeeper.ZKUtil(111): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on existing znode=/1-1330347467/rs/5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:24,317 WARN [RS:0;5ed4808ef0e6:46081 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:24,317 INFO [RS:0;5ed4808ef0e6:46081 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:24,317 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:24,317 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,46081,1732148421879] 2024-11-21T00:20:24,320 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:20:24,322 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:20:24,322 INFO [RS:0;5ed4808ef0e6:46081 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:20:24,322 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,322 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:20:24,323 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:20:24,323 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,323 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,323 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,323 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,323 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,323 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:24,324 DEBUG [RS:0;5ed4808ef0e6:46081 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:24,328 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,328 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,328 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,328 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,328 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,328 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46081,1732148421879-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:24,342 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:20:24,343 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46081,1732148421879-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,343 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,343 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.Replication(171): 5ed4808ef0e6,46081,1732148421879 started 2024-11-21T00:20:24,356 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:24,356 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,46081,1732148421879, RpcServer on 5ed4808ef0e6/172.17.0.2:46081, sessionid=0x1015ac3425c0004 2024-11-21T00:20:24,357 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:20:24,357 DEBUG [RS:0;5ed4808ef0e6:46081 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:24,357 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,46081,1732148421879' 2024-11-21T00:20:24,357 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1330347467/flush-table-proc/abort' 2024-11-21T00:20:24,357 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1330347467/flush-table-proc/acquired' 2024-11-21T00:20:24,358 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:20:24,358 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:20:24,358 DEBUG [RS:0;5ed4808ef0e6:46081 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:24,358 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,46081,1732148421879' 2024-11-21T00:20:24,358 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1330347467/online-snapshot/abort' 2024-11-21T00:20:24,358 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1330347467/online-snapshot/acquired' 2024-11-21T00:20:24,358 DEBUG [RS:0;5ed4808ef0e6:46081 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:20:24,358 INFO [RS:0;5ed4808ef0e6:46081 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:20:24,358 INFO [RS:0;5ed4808ef0e6:46081 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:20:24,459 INFO [RS:0;5ed4808ef0e6:46081 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:24,460 INFO [RS:0;5ed4808ef0e6:46081 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C46081%2C1732148421879, suffix=, logDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879, archiveDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/oldWALs, maxLogs=10 2024-11-21T00:20:24,473 DEBUG [RS:0;5ed4808ef0e6:46081 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879/5ed4808ef0e6%2C46081%2C1732148421879.1732148424461, exclude list is [], retry=0 2024-11-21T00:20:24,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44331,DS-4f11d27d-6609-4a0b-939d-c6b8f8795e65,DISK] 2024-11-21T00:20:24,477 INFO [RS:0;5ed4808ef0e6:46081 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879/5ed4808ef0e6%2C46081%2C1732148421879.1732148424461 2024-11-21T00:20:24,478 DEBUG [RS:0;5ed4808ef0e6:46081 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37077:37077)] 2024-11-21T00:20:24,617 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:20:24,618 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2 2024-11-21T00:20:24,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:20:24,877 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:24,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:24,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:24,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:24,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:24,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:25,023 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:25,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:25,025 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:25,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:25,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:25,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:25,028 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:25,028 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,028 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:25,029 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:25,029 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,030 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:25,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740 2024-11-21T00:20:25,031 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740 2024-11-21T00:20:25,032 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:25,032 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:25,032 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:25,033 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:25,034 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:25,035 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63655675, jitterRate=-0.051456525921821594}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:25,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148425023Initializing all the Stores at 1732148425024 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148425024Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148425024Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148425024Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148425024Cleaning up temporary data from old regions at 1732148425032 (+8 ms)Region opened successfully at 1732148425035 (+3 ms) 2024-11-21T00:20:25,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:25,035 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:25,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:25,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:25,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:25,035 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:25,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148425035Disabling compacts and flushes for region at 1732148425035Disabling writes for close at 1732148425035Writing region close event to WAL at 1732148425035Closed at 1732148425035 2024-11-21T00:20:25,036 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:25,036 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:20:25,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:20:25,037 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:25,038 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:20:25,188 DEBUG [5ed4808ef0e6:37261 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:20:25,189 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:25,190 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,46081,1732148421879, state=OPENING 2024-11-21T00:20:25,203 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:20:25,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:25,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:25,214 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1330347467/meta-region-server: CHANGED 2024-11-21T00:20:25,215 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1330347467/meta-region-server: CHANGED 2024-11-21T00:20:25,215 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:25,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,46081,1732148421879}] 2024-11-21T00:20:25,367 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:20:25,369 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59401, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:20:25,372 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:20:25,373 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:25,373 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:20:25,377 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C46081%2C1732148421879.meta, suffix=.meta, logDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879, archiveDir=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/oldWALs, maxLogs=10 2024-11-21T00:20:25,391 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879/5ed4808ef0e6%2C46081%2C1732148421879.meta.1732148425377.meta, exclude list is [], retry=0 2024-11-21T00:20:25,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44331,DS-4f11d27d-6609-4a0b-939d-c6b8f8795e65,DISK] 2024-11-21T00:20:25,396 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879/5ed4808ef0e6%2C46081%2C1732148421879.meta.1732148425377.meta 2024-11-21T00:20:25,397 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37077:37077)] 2024-11-21T00:20:25,397 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:25,397 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:25,398 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:25,398 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:20:25,398 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:20:25,398 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:20:25,398 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:25,398 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:20:25,398 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:20:25,400 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:25,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:25,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:25,402 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:25,402 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,402 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:25,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:25,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:25,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:25,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:25,405 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:25,405 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740 2024-11-21T00:20:25,406 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740 2024-11-21T00:20:25,407 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:25,407 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:25,407 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:25,408 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:25,409 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59349937, jitterRate=-0.11561702191829681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:25,409 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:20:25,409 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148425398Writing region info on filesystem at 1732148425398Initializing all the Stores at 1732148425399 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148425399Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148425400 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148425400Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148425400Cleaning up temporary data from old regions at 1732148425407 (+7 ms)Running coprocessor post-open hooks at 1732148425409 (+2 ms)Region opened successfully at 1732148425409 2024-11-21T00:20:25,410 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148425367 2024-11-21T00:20:25,412 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:20:25,413 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:20:25,414 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:25,415 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,46081,1732148421879, state=OPEN 2024-11-21T00:20:25,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1330347467/meta-region-server 2024-11-21T00:20:25,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1330347467/meta-region-server 2024-11-21T00:20:25,424 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:25,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1330347467/meta-region-server: CHANGED 2024-11-21T00:20:25,424 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1330347467/meta-region-server: CHANGED 2024-11-21T00:20:25,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:20:25,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,46081,1732148421879 in 209 msec 2024-11-21T00:20:25,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:20:25,428 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 390 msec 2024-11-21T00:20:25,429 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:25,429 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:20:25,430 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:25,430 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,46081,1732148421879, seqNum=-1] 2024-11-21T00:20:25,431 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:25,432 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55583, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:25,437 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2510 sec 2024-11-21T00:20:25,437 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148425437, completionTime=-1 2024-11-21T00:20:25,437 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:20:25,437 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148485440 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148545440 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37261,1732148421757-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37261,1732148421757-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37261,1732148421757-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:37261, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:25,440 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:25,442 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:20:25,444 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.483sec 2024-11-21T00:20:25,444 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:20:25,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:20:25,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:20:25,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:20:25,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:20:25,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37261,1732148421757-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:25,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37261,1732148421757-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:20:25,447 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:20:25,447 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:20:25,447 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37261,1732148421757-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:25,518 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@239d60da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,518 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37261,-1 for getting cluster id 2024-11-21T00:20:25,518 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:25,519 DEBUG [HMaster-EventLoopGroup-19-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '81c523d2-6ef8-4bc8-b27d-9a8b671a4708' 2024-11-21T00:20:25,519 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:25,519 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "81c523d2-6ef8-4bc8-b27d-9a8b671a4708" 2024-11-21T00:20:25,520 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fe1f74e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,520 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37261,-1] 2024-11-21T00:20:25,520 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:25,520 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:25,521 INFO [HMaster-EventLoopGroup-19-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58128, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:25,522 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@776508e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,522 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:25,523 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,46081,1732148421879, seqNum=-1] 2024-11-21T00:20:25,523 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:25,524 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46158, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:25,526 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:25,527 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:25,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:25,540 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015ac3425c0005 connected 2024-11-21T00:20:25,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fa60bbe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,553 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,33815,-1 for getting cluster id 2024-11-21T00:20:25,553 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:25,554 DEBUG [HMaster-EventLoopGroup-17-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '212ecdbe-1c03-48c4-b613-45677eb0d5d7' 2024-11-21T00:20:25,554 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:25,554 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "212ecdbe-1c03-48c4-b613-45677eb0d5d7" 2024-11-21T00:20:25,554 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c5778bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,554 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,33815,-1] 2024-11-21T00:20:25,555 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:25,555 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:25,556 INFO [HMaster-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39514, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:25,557 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@309f2ff3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,557 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:25,558 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:25,558 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1df57f12 2024-11-21T00:20:25,558 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:25,559 INFO [HMaster-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:25,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:37261,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:20:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:20:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:25,562 DEBUG [PEWorker-3 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:37261' 2024-11-21T00:20:25,566 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e874248, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,566 DEBUG [PEWorker-3 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37261,-1 for getting cluster id 2024-11-21T00:20:25,566 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:25,567 DEBUG [HMaster-EventLoopGroup-19-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '81c523d2-6ef8-4bc8-b27d-9a8b671a4708' 2024-11-21T00:20:25,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:25,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "81c523d2-6ef8-4bc8-b27d-9a8b671a4708" 2024-11-21T00:20:25,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@304530c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37261,-1] 2024-11-21T00:20:25,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:25,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:25,569 INFO [HMaster-EventLoopGroup-19-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58148, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:25,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12f232a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:25,570 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:25,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:25,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@56af950d 2024-11-21T00:20:25,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:25,571 INFO [HMaster-EventLoopGroup-19-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58152, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:25,572 INFO [PEWorker-3 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-3. 2024-11-21T00:20:25,572 DEBUG [PEWorker-3 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:20:25,572 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:25,573 DEBUG [PEWorker-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:25,573 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:25,573 INFO [PEWorker-3 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:25,574 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1139): Stored pid=5, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:20:25,575 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:20:25,575 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:25,576 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:20:25,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741835_1011 (size=1138) 2024-11-21T00:20:25,624 DEBUG [PEWorker-3 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:20:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:25,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:25,988 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => be07aaacfffca5950a9623d74f366e3c, NAME => 'hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0 2024-11-21T00:20:25,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741836_1012 (size=44) 2024-11-21T00:20:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:26,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:20:26,308 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T00:20:26,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:26,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver Metrics about HBase RegionObservers 2024-11-21T00:20:26,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:26,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T00:20:26,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:20:26,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T00:20:26,397 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:26,397 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing be07aaacfffca5950a9623d74f366e3c, disabling compactions & flushes 2024-11-21T00:20:26,397 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:26,397 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:26,397 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. after waiting 0 ms 2024-11-21T00:20:26,397 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:26,397 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:26,397 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for be07aaacfffca5950a9623d74f366e3c: Waiting for close lock at 1732148426397Disabling compacts and flushes for region at 1732148426397Disabling writes for close at 1732148426397Writing region close event to WAL at 1732148426397Closed at 1732148426397 2024-11-21T00:20:26,398 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:20:26,399 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148426398"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148426398"}]},"ts":"1732148426398"} 2024-11-21T00:20:26,402 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:20:26,403 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:20:26,403 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148426403"}]},"ts":"1732148426403"} 2024-11-21T00:20:26,406 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:20:26,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN}] 2024-11-21T00:20:26,410 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN 2024-11-21T00:20:26,410 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,41953,1732148416380; forceNewPlan=false, retain=false 2024-11-21T00:20:26,561 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=6 updating hbase:meta row=be07aaacfffca5950a9623d74f366e3c, regionState=OPENING, regionLocation=5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:26,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-18-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN because future has completed 2024-11-21T00:20:26,564 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=7, ppid=6, state=RUNNABLE, hasLock=false; OpenRegionProcedure be07aaacfffca5950a9623d74f366e3c, server=5ed4808ef0e6,41953,1732148416380}] 2024-11-21T00:20:26,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:26,721 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:26,722 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:26,722 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:20:26,723 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C41953%2C1732148416380.rep, suffix=, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs, maxLogs=10 2024-11-21T00:20:26,737 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.rep.1732148426724, exclude list is [], retry=0 2024-11-21T00:20:26,739 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:26,740 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.rep.1732148426724 2024-11-21T00:20:26,741 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:26,741 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7752): Opening region: {ENCODED => be07aaacfffca5950a9623d74f366e3c, NAME => 'hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:26,742 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:26,742 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:26,742 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. service=MultiRowMutationService 2024-11-21T00:20:26,742 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:20:26,742 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,743 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:26,743 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7794): checking encryption for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,743 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(7797): checking classloading for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,744 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,745 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be07aaacfffca5950a9623d74f366e3c columnFamilyName hfileref 2024-11-21T00:20:26,746 DEBUG [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:26,746 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(327): Store=be07aaacfffca5950a9623d74f366e3c/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:26,746 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,747 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be07aaacfffca5950a9623d74f366e3c columnFamilyName queue 2024-11-21T00:20:26,747 DEBUG [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:26,747 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(327): Store=be07aaacfffca5950a9623d74f366e3c/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:26,748 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,749 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be07aaacfffca5950a9623d74f366e3c columnFamilyName sid 2024-11-21T00:20:26,749 DEBUG [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:26,749 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(327): Store=be07aaacfffca5950a9623d74f366e3c/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:26,749 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1038): replaying wal for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,750 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,750 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,751 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1048): stopping wal replay for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,751 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1060): Cleaning up temporary data for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,751 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:20:26,752 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1093): writing seq id for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,756 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:26,757 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1114): Opened be07aaacfffca5950a9623d74f366e3c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61647030, jitterRate=-0.08138766884803772}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:20:26,757 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1122): Running coprocessor post-open hooks for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:26,758 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegion(1006): Region open journal for be07aaacfffca5950a9623d74f366e3c: Running coprocessor pre-open hook at 1732148426743Writing region info on filesystem at 1732148426743Initializing all the Stores at 1732148426744 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148426744Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148426744Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148426744Cleaning up temporary data from old regions at 1732148426751 (+7 ms)Running coprocessor post-open hooks at 1732148426757 (+6 ms)Region opened successfully at 1732148426757 2024-11-21T00:20:26,758 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c., pid=7, masterSystemTime=1732148426716 2024-11-21T00:20:26,760 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:26,760 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=7}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:26,761 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=6 updating hbase:meta row=be07aaacfffca5950a9623d74f366e3c, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:26,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-18-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, ppid=6, state=RUNNABLE, hasLock=false; OpenRegionProcedure be07aaacfffca5950a9623d74f366e3c, server=5ed4808ef0e6,41953,1732148416380 because future has completed 2024-11-21T00:20:26,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=7, resume processing ppid=6 2024-11-21T00:20:26,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, ppid=6, state=SUCCESS, hasLock=false; OpenRegionProcedure be07aaacfffca5950a9623d74f366e3c, server=5ed4808ef0e6,41953,1732148416380 in 200 msec 2024-11-21T00:20:26,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:20:26,769 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN in 358 msec 2024-11-21T00:20:26,770 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:20:26,770 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148426770"}]},"ts":"1732148426770"} 2024-11-21T00:20:26,772 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:20:26,773 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=5, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:20:26,774 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 1.2000 sec 2024-11-21T00:20:26,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-18-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c., hostname=5ed4808ef0e6,41953,1732148416380, seqNum=2] 2024-11-21T00:20:26,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:26,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:26,825 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=4, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:20:26,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41953 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=8 2024-11-21T00:20:26,978 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:20:27,021 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,41953,1732148416380, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:20:27,022 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:27,022 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,41953,1732148416380, seqNum=-1] 2024-11-21T00:20:27,022 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:27,023 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-18-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50825, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=ClientService 2024-11-21T00:20:27,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-18-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,41953,1732148416380', locateType=CURRENT is [region=hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c., hostname=5ed4808ef0e6,41953,1732148416380, seqNum=2] 2024-11-21T00:20:27,028 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-21T00:20:27,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-21T00:20:27,032 INFO [PEWorker-1 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,41953,1732148416380 suceeded 2024-11-21T00:20:27,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=4 2024-11-21T00:20:27,034 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 207 msec 2024-11-21T00:20:27,034 INFO [PEWorker-4 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:37261,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:20:27,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.4750 sec 2024-11-21T00:20:27,047 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:37261' 2024-11-21T00:20:27,049 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@148d6a8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:27,049 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37261,-1 for getting cluster id 2024-11-21T00:20:27,049 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:27,050 DEBUG [HMaster-EventLoopGroup-19-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '81c523d2-6ef8-4bc8-b27d-9a8b671a4708' 2024-11-21T00:20:27,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:27,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "81c523d2-6ef8-4bc8-b27d-9a8b671a4708" 2024-11-21T00:20:27,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@358c2349, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:27,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37261,-1] 2024-11-21T00:20:27,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:27,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:27,051 INFO [HMaster-EventLoopGroup-19-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58166, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:27,052 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@31f69650, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:27,052 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:27,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:27,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1ef3b12c 2024-11-21T00:20:27,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-20-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:27,054 INFO [HMaster-EventLoopGroup-19-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58176, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=MasterService 2024-11-21T00:20:27,054 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,41953,1732148416380 (queues=1) is replicating from cluster=212ecdbe-1c03-48c4-b613-45677eb0d5d7 to cluster=81c523d2-6ef8-4bc8-b27d-9a8b671a4708 2024-11-21T00:20:27,054 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C41953%2C1732148416380 2024-11-21T00:20:27,055 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,41953,1732148416380, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:20:27,055 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.wal-reader.5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456, startPosition=0, beingWritten=true 2024-11-21T00:20:27,056 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.shipper5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C41953%2C1732148416380 2024-11-21T00:20:27,262 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.wal-reader.5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456 to pos 0, reset compression=false 2024-11-21T00:20:27,566 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.wal-reader.5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456 to pos 0, reset compression=false 2024-11-21T00:20:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:27,718 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:27,718 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:20:27,718 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:525) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:27,718 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:27,719 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:27,719 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:27,719 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:27,720 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:27,720 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@58a83a9f 2024-11-21T00:20:27,720 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:27,722 INFO [HMaster-EventLoopGroup-17-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:27,722 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33815 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:20:27,723 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:27,723 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:538) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:27,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:27,723 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:27,723 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:20:27,723 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=100034015, stopped=false 2024-11-21T00:20:27,723 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,33815,1732148416153 2024-11-21T00:20:27,723 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:27,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:27,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:27,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:27,737 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:27,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:27,737 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:27,737 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:538) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:27,737 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:27,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:27,737 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,41953,1732148416380' ***** 2024-11-21T00:20:27,737 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:20:27,737 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:20:27,738 INFO [RS:0;5ed4808ef0e6:41953 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:20:27,738 INFO [RS:0;5ed4808ef0e6:41953 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:20:27,738 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(3091): Received CLOSE for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:27,738 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:20:27,739 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:27,744 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:27,744 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:27,744 INFO [RS:0;5ed4808ef0e6:41953 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:41953. 2024-11-21T00:20:27,744 DEBUG [RS:0;5ed4808ef0e6:41953 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:27,745 DEBUG [RS:0;5ed4808ef0e6:41953 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:27,745 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:20:27,745 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:20:27,745 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:20:27,745 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:20:27,745 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:20:27,746 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, be07aaacfffca5950a9623d74f366e3c=hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.} 2024-11-21T00:20:27,746 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing be07aaacfffca5950a9623d74f366e3c, disabling compactions & flushes 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:27,748 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:27,748 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. after waiting 0 ms 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:27,748 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:27,749 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing be07aaacfffca5950a9623d74f366e3c 3/3 column families, dataSize=147 B heapSize=992 B 2024-11-21T00:20:27,749 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-21T00:20:27,764 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/.tmp/queue/9d516b4bd1a643ffae4bdcf5f4608a4c is 151, key is 1-5ed4808ef0e6,41953,1732148416380/queue:5ed4808ef0e6%2C41953%2C1732148416380/1732148427026/Put/seqid=0 2024-11-21T00:20:27,766 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/info/f32d6059b551438b861e8533e7b04552 is 147, key is hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c./info:regioninfo/1732148426761/Put/seqid=0 2024-11-21T00:20:27,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741838_1014 (size=5350) 2024-11-21T00:20:27,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741839_1015 (size=6631) 2024-11-21T00:20:27,821 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:27,946 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:27,971 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.wal-reader.5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456 to pos 0, reset compression=false 2024-11-21T00:20:28,146 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:28,173 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=147 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/.tmp/queue/9d516b4bd1a643ffae4bdcf5f4608a4c 2024-11-21T00:20:28,177 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.17 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/info/f32d6059b551438b861e8533e7b04552 2024-11-21T00:20:28,181 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/.tmp/queue/9d516b4bd1a643ffae4bdcf5f4608a4c as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/queue/9d516b4bd1a643ffae4bdcf5f4608a4c 2024-11-21T00:20:28,186 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/queue/9d516b4bd1a643ffae4bdcf5f4608a4c, entries=1, sequenceid=5, filesize=5.2 K 2024-11-21T00:20:28,187 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~147 B/147, heapSize ~464 B/464, currentSize=0 B/0 for be07aaacfffca5950a9623d74f366e3c in 439ms, sequenceid=5, compaction requested=false 2024-11-21T00:20:28,187 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:20:28,191 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T00:20:28,192 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:28,192 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:28,192 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:28,192 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for be07aaacfffca5950a9623d74f366e3c: Waiting for close lock at 1732148427748Running coprocessor pre-close hooks at 1732148427748Disabling compacts and flushes for region at 1732148427748Disabling writes for close at 1732148427748Obtaining lock to block concurrent updates at 1732148427749 (+1 ms)Preparing flush snapshotting stores in be07aaacfffca5950a9623d74f366e3c at 1732148427749Finished memstore snapshotting hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c., syncing WAL and waiting on mvcc, flushsize=dataSize=147, getHeapSize=944, getOffHeapSize=0, getCellsCount=1 at 1732148427749Flushing stores of hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. at 1732148427749Flushing be07aaacfffca5950a9623d74f366e3c/queue: creating writer at 1732148427750 (+1 ms)Flushing be07aaacfffca5950a9623d74f366e3c/queue: appending metadata at 1732148427763 (+13 ms)Flushing be07aaacfffca5950a9623d74f366e3c/queue: closing flushed file at 1732148427763Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a882c6b: reopening flushed file at 1732148428180 (+417 ms)Finished flush of dataSize ~147 B/147, heapSize ~464 B/464, currentSize=0 B/0 for be07aaacfffca5950a9623d74f366e3c in 439ms, sequenceid=5, compaction requested=false at 1732148428187 (+7 ms)Writing region close event to WAL at 1732148428188 (+1 ms)Running coprocessor post-close hooks at 1732148428192 (+4 ms)Closed at 1732148428192 2024-11-21T00:20:28,192 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:28,200 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/ns/a3ce47a20f0e4079b5056bd104bb2951 is 43, key is default/ns:d/1732148419508/Put/seqid=0 2024-11-21T00:20:28,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741840_1016 (size=5153) 2024-11-21T00:20:28,346 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:28,350 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:20:28,350 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:20:28,476 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.wal-reader.5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456 to pos 0, reset compression=false 2024-11-21T00:20:28,546 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:28,604 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/ns/a3ce47a20f0e4079b5056bd104bb2951 2024-11-21T00:20:28,627 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/table/68a268b5ccce4159af07f0996c528430 is 53, key is hbase:replication/table:state/1732148426770/Put/seqid=0 2024-11-21T00:20:28,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741841_1017 (size=5256) 2024-11-21T00:20:28,747 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:20:28,747 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:20:28,747 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:28,948 DEBUG [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:29,040 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=98 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/table/68a268b5ccce4159af07f0996c528430 2024-11-21T00:20:29,053 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/info/f32d6059b551438b861e8533e7b04552 as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/info/f32d6059b551438b861e8533e7b04552 2024-11-21T00:20:29,061 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/info/f32d6059b551438b861e8533e7b04552, entries=10, sequenceid=11, filesize=6.5 K 2024-11-21T00:20:29,062 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/ns/a3ce47a20f0e4079b5056bd104bb2951 as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/ns/a3ce47a20f0e4079b5056bd104bb2951 2024-11-21T00:20:29,067 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/ns/a3ce47a20f0e4079b5056bd104bb2951, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:20:29,068 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/table/68a268b5ccce4159af07f0996c528430 as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/table/68a268b5ccce4159af07f0996c528430 2024-11-21T00:20:29,077 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/table/68a268b5ccce4159af07f0996c528430, entries=2, sequenceid=11, filesize=5.1 K 2024-11-21T00:20:29,078 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1368, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 1330ms, sequenceid=11, compaction requested=false 2024-11-21T00:20:29,081 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.wal-reader.5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.1732148418456 to pos 0, reset compression=false 2024-11-21T00:20:29,098 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T00:20:29,099 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:29,099 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:29,099 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:29,100 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148427748Running coprocessor pre-close hooks at 1732148427748Disabling compacts and flushes for region at 1732148427748Disabling writes for close at 1732148427748Obtaining lock to block concurrent updates at 1732148427749 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732148427749Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1368, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1732148427749Flushing stores of hbase:meta,,1.1588230740 at 1732148427752 (+3 ms)Flushing 1588230740/info: creating writer at 1732148427752Flushing 1588230740/info: appending metadata at 1732148427766 (+14 ms)Flushing 1588230740/info: closing flushed file at 1732148427766Flushing 1588230740/ns: creating writer at 1732148428182 (+416 ms)Flushing 1588230740/ns: appending metadata at 1732148428199 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732148428199Flushing 1588230740/table: creating writer at 1732148428610 (+411 ms)Flushing 1588230740/table: appending metadata at 1732148428627 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732148428627Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27126afe: reopening flushed file at 1732148429052 (+425 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@335c84d0: reopening flushed file at 1732148429061 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36bb7b18: reopening flushed file at 1732148429067 (+6 ms)Finished flush of dataSize ~1.34 KB/1368, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 1330ms, sequenceid=11, compaction requested=false at 1732148429078 (+11 ms)Writing region close event to WAL at 1732148429084 (+6 ms)Running coprocessor post-close hooks at 1732148429099 (+15 ms)Closed at 1732148429099 2024-11-21T00:20:29,100 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:29,148 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,41953,1732148416380; all regions closed. 2024-11-21T00:20:29,152 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380/5ed4808ef0e6%2C41953%2C1732148416380.meta.1732148419353.meta not finished, retry = 0 2024-11-21T00:20:29,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741834_1010 (size=2742) 2024-11-21T00:20:29,256 DEBUG [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs 2024-11-21T00:20:29,256 INFO [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C41953%2C1732148416380.meta:.meta(num 1732148419353) 2024-11-21T00:20:29,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741837_1013 (size=1586) 2024-11-21T00:20:29,260 DEBUG [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs 2024-11-21T00:20:29,260 INFO [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C41953%2C1732148416380.rep:(num 1732148426724) 2024-11-21T00:20:29,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741832_1008 (size=93) 2024-11-21T00:20:29,272 DEBUG [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs 2024-11-21T00:20:29,272 INFO [RS:0;5ed4808ef0e6:41953 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C41953%2C1732148416380:(num 1732148418456) 2024-11-21T00:20:29,272 DEBUG [RS:0;5ed4808ef0e6:41953 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:29,272 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:29,272 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:29,272 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:29,273 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:29,273 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:29,273 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,41953,1732148416380 because: Region server is closing 2024-11-21T00:20:29,273 INFO [RS:0;5ed4808ef0e6:41953 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:41953. 2024-11-21T00:20:29,273 DEBUG [RS:0;5ed4808ef0e6:41953 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:29,273 DEBUG [RS:0;5ed4808ef0e6:41953 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:29,273 DEBUG [RS:0;5ed4808ef0e6:41953 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:29,273 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:29,374 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.wal-reader.5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:20:29,374 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.shipper5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:20:29,374 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,41953,1732148416380.replicationSource.shipper5ed4808ef0e6%2C41953%2C1732148416380,1-5ed4808ef0e6,41953,1732148416380 terminated 2024-11-21T00:20:29,374 INFO [RS:0;5ed4808ef0e6:41953 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41953 2024-11-21T00:20:29,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/rs 2024-11-21T00:20:29,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/rs/5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:29,434 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:29,445 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,41953,1732148416380] 2024-11-21T00:20:29,455 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/draining/5ed4808ef0e6,41953,1732148416380 already deleted, retry=false 2024-11-21T00:20:29,455 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,41953,1732148416380 expired; onlineServers=0 2024-11-21T00:20:29,455 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,33815,1732148416153' ***** 2024-11-21T00:20:29,455 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:20:29,455 INFO [M:0;5ed4808ef0e6:33815 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:29,455 INFO [M:0;5ed4808ef0e6:33815 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:29,456 DEBUG [M:0;5ed4808ef0e6:33815 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:20:29,456 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:20:29,456 DEBUG [M:0;5ed4808ef0e6:33815 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:20:29,456 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148418176 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148418176,5,FailOnTimeoutGroup] 2024-11-21T00:20:29,456 INFO [M:0;5ed4808ef0e6:33815 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:29,456 INFO [M:0;5ed4808ef0e6:33815 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:29,456 DEBUG [M:0;5ed4808ef0e6:33815 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:20:29,456 INFO [M:0;5ed4808ef0e6:33815 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:20:29,456 INFO [M:0;5ed4808ef0e6:33815 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:29,456 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148418180 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148418180,5,FailOnTimeoutGroup] 2024-11-21T00:20:29,456 INFO [M:0;5ed4808ef0e6:33815 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:20:29,456 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:20:29,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/master 2024-11-21T00:20:29,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:29,466 DEBUG [M:0;5ed4808ef0e6:33815 {}] zookeeper.ZKUtil(347): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/master because node does not exist (not an error) 2024-11-21T00:20:29,466 WARN [M:0;5ed4808ef0e6:33815 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:20:29,466 INFO [M:0;5ed4808ef0e6:33815 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/.lastflushedseqids 2024-11-21T00:20:29,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741842_1018 (size=181) 2024-11-21T00:20:29,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:29,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41953-0x1015ac3425c0001, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:29,545 INFO [RS:0;5ed4808ef0e6:41953 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:29,545 INFO [RS:0;5ed4808ef0e6:41953 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,41953,1732148416380; zookeeper connection closed. 2024-11-21T00:20:29,545 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@52d2fd4c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@52d2fd4c 2024-11-21T00:20:29,545 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:20:29,873 INFO [M:0;5ed4808ef0e6:33815 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:20:29,873 INFO [M:0;5ed4808ef0e6:33815 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:20:29,874 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:29,874 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:29,874 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:29,874 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:29,874 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:29,874 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=35.09 KB heapSize=42.23 KB 2024-11-21T00:20:29,897 DEBUG [M:0;5ed4808ef0e6:33815 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3bf6882543e040c087d9f5ffb07ef95f is 82, key is hbase:meta,,1/info:regioninfo/1732148419390/Put/seqid=0 2024-11-21T00:20:29,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741843_1019 (size=5672) 2024-11-21T00:20:30,305 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3bf6882543e040c087d9f5ffb07ef95f 2024-11-21T00:20:30,320 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:20:30,330 DEBUG [M:0;5ed4808ef0e6:33815 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6ee8d99d8214416fa456aa971ed8531a is 1478, key is \x00\x00\x00\x00\x00\x00\x00\x05/proc:d/1732148426774/Put/seqid=0 2024-11-21T00:20:30,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741844_1020 (size=7203) 2024-11-21T00:20:30,344 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=34.54 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6ee8d99d8214416fa456aa971ed8531a 2024-11-21T00:20:30,368 DEBUG [M:0;5ed4808ef0e6:33815 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/456310e109de4d68a33fbfd1c1eb3bbe is 69, key is 5ed4808ef0e6,41953,1732148416380/rs:state/1732148418273/Put/seqid=0 2024-11-21T00:20:30,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741845_1021 (size=5156) 2024-11-21T00:20:30,390 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/456310e109de4d68a33fbfd1c1eb3bbe 2024-11-21T00:20:30,399 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3bf6882543e040c087d9f5ffb07ef95f as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3bf6882543e040c087d9f5ffb07ef95f 2024-11-21T00:20:30,405 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3bf6882543e040c087d9f5ffb07ef95f, entries=8, sequenceid=70, filesize=5.5 K 2024-11-21T00:20:30,406 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6ee8d99d8214416fa456aa971ed8531a as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6ee8d99d8214416fa456aa971ed8531a 2024-11-21T00:20:30,411 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6ee8d99d8214416fa456aa971ed8531a, entries=8, sequenceid=70, filesize=7.0 K 2024-11-21T00:20:30,412 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/456310e109de4d68a33fbfd1c1eb3bbe as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/456310e109de4d68a33fbfd1c1eb3bbe 2024-11-21T00:20:30,422 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/456310e109de4d68a33fbfd1c1eb3bbe, entries=1, sequenceid=70, filesize=5.0 K 2024-11-21T00:20:30,429 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(3140): Finished flush of dataSize ~35.09 KB/35933, heapSize ~41.94 KB/42944, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=70, compaction requested=false 2024-11-21T00:20:30,436 INFO [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:30,436 DEBUG [M:0;5ed4808ef0e6:33815 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148429874Disabling compacts and flushes for region at 1732148429874Disabling writes for close at 1732148429874Obtaining lock to block concurrent updates at 1732148429874Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148429874Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=35933, getHeapSize=43184, getOffHeapSize=0, getCellsCount=83 at 1732148429874Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148429875 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148429875Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148429897 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148429897Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148430310 (+413 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148430329 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148430330 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148430349 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148430368 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148430368Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d2abf04: reopening flushed file at 1732148430398 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@789e822c: reopening flushed file at 1732148430405 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6554009c: reopening flushed file at 1732148430411 (+6 ms)Finished flush of dataSize ~35.09 KB/35933, heapSize ~41.94 KB/42944, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 554ms, sequenceid=70, compaction requested=false at 1732148430429 (+18 ms)Writing region close event to WAL at 1732148430436 (+7 ms)Closed at 1732148430436 2024-11-21T00:20:30,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741830_1006 (size=41008) 2024-11-21T00:20:30,447 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 not finished, retry = 0 2024-11-21T00:20:30,548 INFO [M:0;5ed4808ef0e6:33815 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:20:30,548 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:30,548 INFO [M:0;5ed4808ef0e6:33815 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33815 2024-11-21T00:20:30,548 INFO [M:0;5ed4808ef0e6:33815 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:30,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:30,660 INFO [M:0;5ed4808ef0e6:33815 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:30,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33815-0x1015ac3425c0000, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:30,663 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:30,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:30,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:30,663 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:30,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:30,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:30,663 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:20:30,663 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:30,664 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44785 2024-11-21T00:20:30,665 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44785 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:30,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:447850x0, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:30,677 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44785-0x1015ac3425c0006 connected 2024-11-21T00:20:30,693 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:30,694 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:30,696 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:30,696 DEBUG [pool-1006-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: INIT 2024-11-21T00:20:30,696 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0, hbase.cluster.distributed=false 2024-11-21T00:20:30,698 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/acl 2024-11-21T00:20:30,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44785 2024-11-21T00:20:30,698 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44785 2024-11-21T00:20:30,703 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44785 2024-11-21T00:20:30,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44785 2024-11-21T00:20:30,720 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44785 2024-11-21T00:20:30,736 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:30,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:30,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:30,736 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:30,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:30,736 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:30,736 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:20:30,736 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:30,740 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35081 2024-11-21T00:20:30,741 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35081 connecting to ZooKeeper ensemble=127.0.0.1:62031 2024-11-21T00:20:30,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:30,743 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:30,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350810x0, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:30,761 DEBUG [pool-1011-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: INIT 2024-11-21T00:20:30,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:350810x0, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:30,761 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:20:30,764 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35081-0x1015ac3425c0007 connected 2024-11-21T00:20:30,764 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:20:30,765 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/master 2024-11-21T00:20:30,766 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/acl 2024-11-21T00:20:30,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35081 2024-11-21T00:20:30,772 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35081 2024-11-21T00:20:30,776 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35081 2024-11-21T00:20:30,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35081 2024-11-21T00:20:30,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35081 2024-11-21T00:20:30,797 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:44785 2024-11-21T00:20:30,798 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0-586781601/backup-masters/5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:30,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:30,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:30,810 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on existing znode=/0-586781601/backup-masters/5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:30,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:30,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-586781601/master 2024-11-21T00:20:30,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:30,821 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on existing znode=/0-586781601/master 2024-11-21T00:20:30,821 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0-586781601/backup-masters/5ed4808ef0e6,44785,1732148430662 from backup master directory 2024-11-21T00:20:30,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/backup-masters/5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:30,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:30,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/backup-masters 2024-11-21T00:20:30,834 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:30,834 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:30,848 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:30,848 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:20:30,867 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=2) cost 19ms. 2024-11-21T00:20:30,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741846_1022 (size=196) 2024-11-21T00:20:30,900 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:30,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:30,928 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:30,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:30,929 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:30,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:31,286 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:31,287 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:20:31,287 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:31,293 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(342): old store file tracker DEFAULT is the same with new store file tracker, skip migration 2024-11-21T00:20:31,296 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(316): Renamed hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153 to hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153-dead as it is dead 2024-11-21T00:20:31,296 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153-dead/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 2024-11-21T00:20:31,297 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153-dead/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 after 0ms 2024-11-21T00:20:31,298 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(328): Renamed hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153-dead/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 to hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 2024-11-21T00:20:31,298 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(330): Delete empty local region wal dir hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,33815,1732148416153-dead 2024-11-21T00:20:31,299 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:31,300 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:31,301 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C44785%2C1732148430662, suffix=, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,44785,1732148430662, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/oldWALs, maxLogs=10 2024-11-21T00:20:31,319 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,44785,1732148430662/5ed4808ef0e6%2C44785%2C1732148430662.1732148431301, exclude list is [], retry=0 2024-11-21T00:20:31,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:31,325 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,44785,1732148430662/5ed4808ef0e6%2C44785%2C1732148430662.1732148431301 2024-11-21T00:20:31,325 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:31,325 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:31,326 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:31,326 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,326 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,327 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,329 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:20:31,329 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:31,336 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3bf6882543e040c087d9f5ffb07ef95f 2024-11-21T00:20:31,336 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:31,336 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:20:31,337 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:31,345 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6ee8d99d8214416fa456aa971ed8531a 2024-11-21T00:20:31,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:31,345 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,346 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:20:31,346 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:31,354 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/456310e109de4d68a33fbfd1c1eb3bbe 2024-11-21T00:20:31,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:31,354 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:20:31,355 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:31,355 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:31,356 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,356 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5516): Found 1 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals 2024-11-21T00:20:31,356 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 2024-11-21T00:20:31,361 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5793): Applied 0, skipped 85, firstSequenceIdInLog=3, maxSequenceIdInLog=72, path=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 2024-11-21T00:20:31,362 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.wals/5ed4808ef0e6%2C33815%2C1732148416153.1732148417752 2024-11-21T00:20:31,364 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,364 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,365 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:31,366 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:31,369 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/72.seqid, newMaxSeqId=72, maxSeqId=1 2024-11-21T00:20:31,369 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=73; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72503707, jitterRate=0.08038942515850067}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:31,370 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148431326Initializing all the Stores at 1732148431327 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148431327Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148431327Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148431327Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148431327Cleaning up temporary data from old regions at 1732148431364 (+37 ms)Region opened successfully at 1732148431370 (+6 ms) 2024-11-21T00:20:31,370 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:20:31,372 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cab03c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:31,374 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(884): The info family in master local region already has data in it, skip migrating... 2024-11-21T00:20:31,374 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:20:31,374 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:20:31,374 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:20:31,375 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:20:31,377 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:31,377 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=4, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:20:31,378 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(545): Completed pid=5, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:20:31,378 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 2 msec 2024-11-21T00:20:31,378 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:20:31,380 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=1588230740, regionState=OPEN, lastHost=5ed4808ef0e6,41953,1732148416380, regionLocation=5ed4808ef0e6,41953,1732148416380, openSeqNum=2 2024-11-21T00:20:31,381 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(349): Loaded hbase:meta state=OPEN, location=5ed4808ef0e6,41953,1732148416380, table=hbase:meta, region=1588230740 2024-11-21T00:20:31,381 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,41953,1732148416380, state=OPEN 2024-11-21T00:20:31,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:31,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:31,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:31,505 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:31,508 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 1 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:20:31,555 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/draining/5ed4808ef0e6,41953,1732148416380 already deleted, retry=false 2024-11-21T00:20:31,555 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(695): Processing expiration of 5ed4808ef0e6,41953,1732148416380 on 5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:31,557 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 5ed4808ef0e6,41953,1732148416380, splitWal=true, meta=true 2024-11-21T00:20:31,557 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1991): Scheduled ServerCrashProcedure pid=9 for 5ed4808ef0e6,41953,1732148416380 (carryingMeta=true) 5ed4808ef0e6,41953,1732148416380/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@4187b50b[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-11-21T00:20:31,560 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/balancer because node does not exist (not necessarily an error) 2024-11-21T00:20:31,629 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/balancer already deleted, retry=false 2024-11-21T00:20:31,629 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:20:31,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:20:31,703 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/normalizer already deleted, retry=false 2024-11-21T00:20:31,704 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:20:31,705 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:20:31,787 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/switch/split already deleted, retry=false 2024-11-21T00:20:31,789 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:20:31,802 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/switch/merge already deleted, retry=false 2024-11-21T00:20:31,815 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Unable to get data of znode /0-586781601/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:20:31,823 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/snapshot-cleanup already deleted, retry=false 2024-11-21T00:20:31,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:31,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:31,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:31,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:31,834 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,44785,1732148430662, sessionid=0x1015ac3425c0006, setting cluster-up flag (Was=false) 2024-11-21T00:20:31,845 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-586781601/flush-table-proc/acquired, /0-586781601/flush-table-proc/reached, /0-586781601/flush-table-proc/abort 2024-11-21T00:20:31,846 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:31,855 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-586781601/online-snapshot/acquired, /0-586781601/online-snapshot/reached, /0-586781601/online-snapshot/abort 2024-11-21T00:20:31,856 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:31,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1190): begin to load .lastflushedseqids at hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/.lastflushedseqids 2024-11-21T00:20:31,861 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:20:31,861 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:20:31,861 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,44785,1732148430662 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 1 5ed4808ef0e6,41953,1732148416380 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:31,862 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:31,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148461863 2024-11-21T00:20:31,864 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(169): Start pid=9, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 5ed4808ef0e6,41953,1732148416380, splitWal=true, meta=true 2024-11-21T00:20:31,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:20:31,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:20:31,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:20:31,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:20:31,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:20:31,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:20:31,864 INFO [PEWorker-1 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=9, state=RUNNABLE:SERVER_CRASH_SPLIT_META_LOGS, hasLock=true; ServerCrashProcedure 5ed4808ef0e6,41953,1732148416380, splitWal=true, meta=true, isMeta: true 2024-11-21T00:20:31,866 DEBUG [PEWorker-1 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380-splitting 2024-11-21T00:20:31,867 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380-splitting dir is empty, no logs to split. 2024-11-21T00:20:31,867 INFO [PEWorker-1 {}] master.SplitWALManager(105): 5ed4808ef0e6,41953,1732148416380 WAL count=0, meta=true 2024-11-21T00:20:31,868 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:31,868 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:20:31,869 INFO [PEWorker-1 {}] master.SplitLogManager(171): hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380-splitting dir is empty, no logs to split. 2024-11-21T00:20:31,869 INFO [PEWorker-1 {}] master.SplitWALManager(105): 5ed4808ef0e6,41953,1732148416380 WAL count=0, meta=true 2024-11-21T00:20:31,869 DEBUG [PEWorker-1 {}] procedure.ServerCrashProcedure(329): Check if 5ed4808ef0e6,41953,1732148416380 WAL splitting is done? wals=0, meta=true 2024-11-21T00:20:31,869 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148431869,5,FailOnTimeoutGroup] 2024-11-21T00:20:31,869 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148431869,5,FailOnTimeoutGroup] 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:31,869 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:31,870 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148431869, completionTime=-1 2024-11-21T00:20:31,870 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(848): The value of 'hbase.master.wait.on.regionservers.maxtostart' (-1) is set less than 'hbase.master.wait.on.regionservers.mintostart' (1), ignoring. 2024-11-21T00:20:31,870 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=0; waited=0ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=0ms 2024-11-21T00:20:31,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:20:31,871 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:31,872 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-21T00:20:31,883 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(746): ClusterId : 212ecdbe-1c03-48c4-b613-45677eb0d5d7 2024-11-21T00:20:31,883 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:20:31,895 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:20:31,895 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:20:31,908 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:20:31,909 DEBUG [RS:0;5ed4808ef0e6:35081 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@624a76d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:31,923 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:35081 2024-11-21T00:20:31,923 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:20:31,923 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:20:31,923 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:20:31,923 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,44785,1732148430662 with port=35081, startcode=1732148430736 2024-11-21T00:20:31,924 DEBUG [RS:0;5ed4808ef0e6:35081 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:20:31,925 INFO [HMaster-EventLoopGroup-21-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46895, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.9 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:20:31,925 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44785 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:31,926 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44785 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:31,927 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0 2024-11-21T00:20:31,927 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34383 2024-11-21T00:20:31,927 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:20:31,970 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=100ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=0ms 2024-11-21T00:20:32,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/rs 2024-11-21T00:20:32,010 DEBUG [RS:0;5ed4808ef0e6:35081 {}] zookeeper.ZKUtil(111): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on existing znode=/0-586781601/rs/5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:32,010 WARN [RS:0;5ed4808ef0e6:35081 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:32,010 INFO [RS:0;5ed4808ef0e6:35081 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:32,010 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:32,010 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,35081,1732148430736] 2024-11-21T00:20:32,022 WARN [5ed4808ef0e6:44785 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:20:32,030 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:20:32,032 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:20:32,032 INFO [RS:0;5ed4808ef0e6:35081 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:20:32,032 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,032 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:20:32,033 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:20:32,033 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,033 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,033 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,033 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,033 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:32,034 DEBUG [RS:0;5ed4808ef0e6:35081 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:32,034 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,034 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,034 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,034 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,035 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,035 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35081,1732148430736-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:32,053 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:20:32,053 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35081,1732148430736-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,072 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,35081,1732148430736, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:20:32,072 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,073 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.Replication(171): 5ed4808ef0e6,35081,1732148430736 started 2024-11-21T00:20:32,091 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:32,091 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,35081,1732148430736, RpcServer on 5ed4808ef0e6/172.17.0.2:35081, sessionid=0x1015ac3425c0007 2024-11-21T00:20:32,091 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource,1-5ed4808ef0e6,35081,1732148430736 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:37261' 2024-11-21T00:20:32,091 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:20:32,091 DEBUG [RS:0;5ed4808ef0e6:35081 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:32,091 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,35081,1732148430736' 2024-11-21T00:20:32,091 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-586781601/flush-table-proc/abort' 2024-11-21T00:20:32,092 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-586781601/flush-table-proc/acquired' 2024-11-21T00:20:32,092 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:20:32,092 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:20:32,092 DEBUG [RS:0;5ed4808ef0e6:35081 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:32,092 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,35081,1732148430736' 2024-11-21T00:20:32,092 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-586781601/online-snapshot/abort' 2024-11-21T00:20:32,093 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-586781601/online-snapshot/acquired' 2024-11-21T00:20:32,093 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource,1-5ed4808ef0e6,35081,1732148430736 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@6a36dd48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:32,093 DEBUG [RS:0;5ed4808ef0e6:35081 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:20:32,093 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource,1-5ed4808ef0e6,35081,1732148430736 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37261,-1 for getting cluster id 2024-11-21T00:20:32,093 INFO [RS:0;5ed4808ef0e6:35081 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:20:32,093 INFO [RS:0;5ed4808ef0e6:35081 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:20:32,093 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource,1-5ed4808ef0e6,35081,1732148430736 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:32,094 DEBUG [HMaster-EventLoopGroup-19-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '81c523d2-6ef8-4bc8-b27d-9a8b671a4708' 2024-11-21T00:20:32,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:32,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "81c523d2-6ef8-4bc8-b27d-9a8b671a4708" 2024-11-21T00:20:32,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@723f3932, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:32,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37261,-1] 2024-11-21T00:20:32,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:32,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:32,096 INFO [HMaster-EventLoopGroup-19-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59058, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.9 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:32,097 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource,1-5ed4808ef0e6,35081,1732148430736 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@572ee6a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:32,097 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource,1-5ed4808ef0e6,35081,1732148430736 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:32,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:32,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2244e17 2024-11-21T00:20:32,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:32,100 INFO [HMaster-EventLoopGroup-19-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59066, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.9 (auth:SIMPLE), service=MasterService 2024-11-21T00:20:32,100 INFO [RS:0;5ed4808ef0e6:35081.replicationSource,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,35081,1732148430736 (queues=0) is replicating from cluster=212ecdbe-1c03-48c4-b613-45677eb0d5d7 to cluster=81c523d2-6ef8-4bc8-b27d-9a8b671a4708 2024-11-21T00:20:32,193 INFO [RS:0;5ed4808ef0e6:35081 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:32,195 INFO [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C35081%2C1732148430736, suffix=, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs, maxLogs=10 2024-11-21T00:20:32,208 DEBUG [RS:0;5ed4808ef0e6:35081 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195, exclude list is [], retry=0 2024-11-21T00:20:32,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:32,212 INFO [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 2024-11-21T00:20:32,212 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.ReplicationSourceManager(789): Start tracking logs for wal group 5ed4808ef0e6%2C35081%2C1732148430736 for peer 1 2024-11-21T00:20:32,213 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C35081%2C1732148430736 2024-11-21T00:20:32,213 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,35081,1732148430736, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:20:32,213 DEBUG [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:32,213 INFO [RS:0;5ed4808ef0e6:35081.replicationSource.shipper5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C35081%2C1732148430736 2024-11-21T00:20:32,213 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195, startPosition=0, beingWritten=true 2024-11-21T00:20:32,272 DEBUG [5ed4808ef0e6:44785 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:20:32,273 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:32,274 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,35081,1732148430736, state=OPENING 2024-11-21T00:20:32,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:32,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:32,294 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:32,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:32,294 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:32,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35081,1732148430736}] 2024-11-21T00:20:32,418 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:32,447 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:20:32,448 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50881, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:20:32,452 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:20:32,452 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:32,452 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:20:32,453 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C35081%2C1732148430736.meta, suffix=.meta, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs, maxLogs=10 2024-11-21T00:20:32,471 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.meta.1732148432454.meta, exclude list is [], retry=0 2024-11-21T00:20:32,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:32,476 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.meta.1732148432454.meta 2024-11-21T00:20:32,476 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:32,477 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:32,477 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:32,477 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:32,477 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:20:32,477 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:20:32,477 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:20:32,477 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:32,477 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:20:32,477 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:20:32,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:32,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:32,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:32,499 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/info/f32d6059b551438b861e8533e7b04552 2024-11-21T00:20:32,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:32,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:32,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:32,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:32,509 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/ns/a3ce47a20f0e4079b5056bd104bb2951 2024-11-21T00:20:32,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:32,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:32,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:32,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:32,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:32,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:32,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:32,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:32,525 DEBUG [StoreOpener-1588230740-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/table/68a268b5ccce4159af07f0996c528430 2024-11-21T00:20:32,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:32,525 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:32,526 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740 2024-11-21T00:20:32,527 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740 2024-11-21T00:20:32,528 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:32,528 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:32,529 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:32,530 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:32,531 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=15; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71424242, jitterRate=0.06430414319038391}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:32,531 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:20:32,531 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148432478Writing region info on filesystem at 1732148432478Initializing all the Stores at 1732148432479 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148432479Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148432479Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148432479Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148432479Cleaning up temporary data from old regions at 1732148432528 (+49 ms)Running coprocessor post-open hooks at 1732148432531 (+3 ms)Region opened successfully at 1732148432531 2024-11-21T00:20:32,532 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=11, masterSystemTime=1732148432446 2024-11-21T00:20:32,534 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:20:32,534 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=11}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:20:32,535 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=15, regionLocation=5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:32,535 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,35081,1732148430736, state=OPEN 2024-11-21T00:20:32,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:32,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-586781601/meta-region-server 2024-11-21T00:20:32,547 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=11, ppid=10, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:32,547 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:32,547 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-586781601/meta-region-server: CHANGED 2024-11-21T00:20:32,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-21T00:20:32,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35081,1732148430736 in 253 msec 2024-11-21T00:20:32,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:20:32,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 679 msec 2024-11-21T00:20:32,730 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:33,137 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:33,478 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=1608ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=1508ms 2024-11-21T00:20:33,655 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:34,260 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:34,968 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:34,981 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(883): Waiting on regionserver count=1; waited=3111ms, expecting min=1 server(s), max=NO_LIMIT server(s), timeout=4500ms, lastChange=3011ms 2024-11-21T00:20:35,772 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:36,383 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=4513ms, expected min=1 server(s), max=NO_LIMIT server(s), master is running 2024-11-21T00:20:36,383 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:20:36,384 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:36,384 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35081,1732148430736, seqNum=-1] 2024-11-21T00:20:36,384 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:36,385 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-22-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56967, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:36,388 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.RegionStateStore(171): Load hbase:meta entry region=be07aaacfffca5950a9623d74f366e3c, regionState=OPEN, lastHost=5ed4808ef0e6,41953,1732148416380, regionLocation=5ed4808ef0e6,41953,1732148416380, openSeqNum=2 2024-11-21T00:20:36,388 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:20:36,388 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148496388 2024-11-21T00:20:36,388 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148556388 2024-11-21T00:20:36,388 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 5 msec 2024-11-21T00:20:36,389 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(207): 5ed4808ef0e6,41953,1732148416380 had 2 regions 2024-11-21T00:20:36,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44785,1732148430662-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:36,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44785,1732148430662-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:36,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44785,1732148430662-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:36,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:44785, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:36,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:36,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:36,390 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=9, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 5ed4808ef0e6,41953,1732148416380, splitWal=true, meta=true, isMeta: false 2024-11-21T00:20:36,391 INFO [PEWorker-3 {}] master.SplitLogManager(171): hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380-splitting dir is empty, no logs to split. 2024-11-21T00:20:36,391 INFO [PEWorker-3 {}] master.SplitWALManager(105): 5ed4808ef0e6,41953,1732148416380 WAL count=0, meta=false 2024-11-21T00:20:36,394 INFO [PEWorker-3 {}] master.SplitLogManager(171): hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,41953,1732148416380-splitting dir is empty, no logs to split. 2024-11-21T00:20:36,394 INFO [PEWorker-3 {}] master.SplitWALManager(105): 5ed4808ef0e6,41953,1732148416380 WAL count=0, meta=false 2024-11-21T00:20:36,394 DEBUG [PEWorker-3 {}] procedure.ServerCrashProcedure(329): Check if 5ed4808ef0e6,41953,1732148416380 WAL splitting is done? wals=0, meta=false 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 5.563sec 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44785,1732148430662-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:36,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44785,1732148430662-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:20:36,399 WARN [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(177): unknown_server=5ed4808ef0e6,41953,1732148416380/hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:36,399 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN}] 2024-11-21T00:20:36,400 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN 2024-11-21T00:20:36,401 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-21T00:20:36,402 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:20:36,402 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:20:36,402 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44785,1732148430662-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:36,486 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@765c3383, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:36,487 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,44785,-1 for getting cluster id 2024-11-21T00:20:36,487 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:36,488 DEBUG [HMaster-EventLoopGroup-21-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '212ecdbe-1c03-48c4-b613-45677eb0d5d7' 2024-11-21T00:20:36,488 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:36,488 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "212ecdbe-1c03-48c4-b613-45677eb0d5d7" 2024-11-21T00:20:36,488 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1934f0aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:36,488 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,44785,-1] 2024-11-21T00:20:36,489 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:36,489 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,490 INFO [HMaster-EventLoopGroup-21-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44034, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:36,491 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57315ebc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:36,491 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:36,492 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35081,1732148430736, seqNum=-1] 2024-11-21T00:20:36,493 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:36,494 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-22-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50850, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:36,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(995): HBase has been restarted 2024-11-21T00:20:36,496 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:36,496 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.HBaseTestingUtil.restartHBaseCluster(HBaseTestingUtil.java:998) at org.apache.hadoop.hbase.HBaseTestingUtil.restartHBaseCluster(HBaseTestingUtil.java:978) at org.apache.hadoop.hbase.HBaseTestingUtil.restartHBaseCluster(HBaseTestingUtil.java:971) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:539) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:36,496 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,497 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:36,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2549): Invalidated connection. Updating master addresses before: 5ed4808ef0e6:44785 after: 5ed4808ef0e6:44785 2024-11-21T00:20:36,500 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@383c3a27, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:36,500 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,44785,-1 for getting cluster id 2024-11-21T00:20:36,501 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:36,501 DEBUG [HMaster-EventLoopGroup-21-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '212ecdbe-1c03-48c4-b613-45677eb0d5d7' 2024-11-21T00:20:36,502 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:36,502 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "212ecdbe-1c03-48c4-b613-45677eb0d5d7" 2024-11-21T00:20:36,502 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@794f31da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:36,502 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,44785,-1] 2024-11-21T00:20:36,502 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:36,502 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,503 INFO [HMaster-EventLoopGroup-21-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44064, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:36,504 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38379009, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:36,504 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:36,505 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:36,505 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@30e7b8db 2024-11-21T00:20:36,505 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:36,506 INFO [HMaster-EventLoopGroup-21-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44076, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:36,506 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44785 {}] master.HMaster(4002): Client=jenkins//172.17.0.2 get replication peer config, id=1 2024-11-21T00:20:36,507 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:20:36,507 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:36,507 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:546) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:36,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,507 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:36,507 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:20:36,508 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1557967890, stopped=false 2024-11-21T00:20:36,508 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,37261,1732148421757 2024-11-21T00:20:36,551 DEBUG [5ed4808ef0e6:44785 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:20:36,552 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=be07aaacfffca5950a9623d74f366e3c, regionState=OPENING, regionLocation=5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:36,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN because future has completed 2024-11-21T00:20:36,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure be07aaacfffca5950a9623d74f366e3c, server=5ed4808ef0e6,35081,1732148430736}] 2024-11-21T00:20:36,676 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:36,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1330347467/running 2024-11-21T00:20:36,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1330347467/running 2024-11-21T00:20:36,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:36,696 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:36,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:36,696 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:36,696 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:546) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:36,696 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,697 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,46081,1732148421879' ***** 2024-11-21T00:20:36,697 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:20:36,697 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:46081. 2024-11-21T00:20:36,697 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on znode that does not yet exist, /1-1330347467/running 2024-11-21T00:20:36,697 DEBUG [RS:0;5ed4808ef0e6:46081 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:36,697 DEBUG [RS:0;5ed4808ef0e6:46081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:20:36,697 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:20:36,698 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Set watcher on znode that does not yet exist, /1-1330347467/running 2024-11-21T00:20:36,698 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:20:36,698 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:20:36,698 DEBUG [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:36,698 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:36,698 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:36,698 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:36,698 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:36,698 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:36,698 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-21T00:20:36,711 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:36,711 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:36,711 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:20:36,713 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C35081%2C1732148430736.rep, suffix=, logDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736, archiveDir=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs, maxLogs=10 2024-11-21T00:20:36,714 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740/.tmp/ns/feb324acb8474b7689ce869a4fa6a7e1 is 43, key is default/ns:d/1732148425433/Put/seqid=0 2024-11-21T00:20:36,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741835_1011 (size=5153) 2024-11-21T00:20:36,724 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740/.tmp/ns/feb324acb8474b7689ce869a4fa6a7e1 2024-11-21T00:20:36,730 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.rep.1732148436713, exclude list is [], retry=0 2024-11-21T00:20:36,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42405,DS-98472a23-70a4-484d-9047-16e1f0420ee3,DISK] 2024-11-21T00:20:36,734 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.rep.1732148436713 2024-11-21T00:20:36,736 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33805:33805)] 2024-11-21T00:20:36,736 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => be07aaacfffca5950a9623d74f366e3c, NAME => 'hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:36,737 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:36,737 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:36,737 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. service=MultiRowMutationService 2024-11-21T00:20:36,737 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:20:36,737 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,737 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:36,738 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,738 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,738 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:36,739 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,740 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be07aaacfffca5950a9623d74f366e3c columnFamilyName hfileref 2024-11-21T00:20:36,740 DEBUG [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:36,740 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740/.tmp/ns/feb324acb8474b7689ce869a4fa6a7e1 as hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740/ns/feb324acb8474b7689ce869a4fa6a7e1 2024-11-21T00:20:36,740 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(327): Store=be07aaacfffca5950a9623d74f366e3c/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:36,741 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,741 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be07aaacfffca5950a9623d74f366e3c columnFamilyName queue 2024-11-21T00:20:36,741 DEBUG [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:36,746 DEBUG [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/queue/9d516b4bd1a643ffae4bdcf5f4608a4c 2024-11-21T00:20:36,746 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740/ns/feb324acb8474b7689ce869a4fa6a7e1, entries=2, sequenceid=6, filesize=5.0 K 2024-11-21T00:20:36,746 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(327): Store=be07aaacfffca5950a9623d74f366e3c/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:36,746 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,747 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false 2024-11-21T00:20:36,748 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be07aaacfffca5950a9623d74f366e3c columnFamilyName sid 2024-11-21T00:20:36,748 DEBUG [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:36,748 INFO [StoreOpener-be07aaacfffca5950a9623d74f366e3c-1 {}] regionserver.HStore(327): Store=be07aaacfffca5950a9623d74f366e3c/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:36,749 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,749 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,750 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,750 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,750 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,751 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:20:36,752 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,752 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(1114): Opened be07aaacfffca5950a9623d74f366e3c; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71073716, jitterRate=0.05908089876174927}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:20:36,753 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:36,753 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for be07aaacfffca5950a9623d74f366e3c: Running coprocessor pre-open hook at 1732148436738Writing region info on filesystem at 1732148436738Initializing all the Stores at 1732148436739 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148436739Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148436739Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148436739Cleaning up temporary data from old regions at 1732148436750 (+11 ms)Running coprocessor post-open hooks at 1732148436753 (+3 ms)Region opened successfully at 1732148436753 2024-11-21T00:20:36,754 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c., pid=13, masterSystemTime=1732148436707 2024-11-21T00:20:36,756 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:36,756 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=13}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:36,757 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=be07aaacfffca5950a9623d74f366e3c, regionState=OPEN, openSeqNum=9, regionLocation=5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:36,757 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T00:20:36,758 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:36,758 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:36,758 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:36,759 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148436698Running coprocessor pre-close hooks at 1732148436698Disabling compacts and flushes for region at 1732148436698Disabling writes for close at 1732148436698Obtaining lock to block concurrent updates at 1732148436698Preparing flush snapshotting stores in 1588230740 at 1732148436698Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732148436699 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148436699Flushing 1588230740/ns: creating writer at 1732148436699Flushing 1588230740/ns: appending metadata at 1732148436714 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732148436714Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b4ef1d: reopening flushed file at 1732148436731 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 49ms, sequenceid=6, compaction requested=false at 1732148436747 (+16 ms)Writing region close event to WAL at 1732148436753 (+6 ms)Running coprocessor post-close hooks at 1732148436758 (+5 ms)Closed at 1732148436758 2024-11-21T00:20:36,759 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:36,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure be07aaacfffca5950a9623d74f366e3c, server=5ed4808ef0e6,35081,1732148430736 because future has completed 2024-11-21T00:20:36,762 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-21T00:20:36,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure be07aaacfffca5950a9623d74f366e3c, server=5ed4808ef0e6,35081,1732148430736 in 206 msec 2024-11-21T00:20:36,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=9 2024-11-21T00:20:36,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=9, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=be07aaacfffca5950a9623d74f366e3c, ASSIGN in 364 msec 2024-11-21T00:20:36,765 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=9, state=RUNNABLE:ASSIGN_REPLICATION_QUEUES_ADD_MISSING_QUEUES, hasLock=false; org.apache.hadoop.hbase.master.replication.AssignReplicationQueuesProcedure}] 2024-11-21T00:20:36,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-22-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='', locateType=CURRENT is [region=hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c., hostname=5ed4808ef0e6,35081,1732148430736, seqNum=9] 2024-11-21T00:20:36,784 DEBUG [PEWorker-2 {}] replication.AssignReplicationQueuesProcedure(120): There are 1 replication queues need to be claimed for 5ed4808ef0e6,41953,1732148416380 2024-11-21T00:20:36,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.ClaimReplicationQueueRemoteProcedure}] 2024-11-21T00:20:36,898 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,46081,1732148421879; all regions closed. 2024-11-21T00:20:36,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741834_1010 (size=1152) 2024-11-21T00:20:36,900 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879/5ed4808ef0e6%2C46081%2C1732148421879.meta.1732148425377.meta not finished, retry = 0 2024-11-21T00:20:36,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35081 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.ClaimReplicationQueueCallable, pid=15 2024-11-21T00:20:37,003 DEBUG [RS:0;5ed4808ef0e6:46081 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/oldWALs 2024-11-21T00:20:37,003 INFO [RS:0;5ed4808ef0e6:46081 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C46081%2C1732148421879.meta:.meta(num 1732148425377) 2024-11-21T00:20:37,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741832_1008 (size=93) 2024-11-21T00:20:37,014 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/WALs/5ed4808ef0e6,46081,1732148421879/5ed4808ef0e6%2C46081%2C1732148421879.1732148424461 not finished, retry = 0 2024-11-21T00:20:37,117 DEBUG [RS:0;5ed4808ef0e6:46081 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/oldWALs 2024-11-21T00:20:37,117 INFO [RS:0;5ed4808ef0e6:46081 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C46081%2C1732148421879:(num 1732148424461) 2024-11-21T00:20:37,118 DEBUG [RS:0;5ed4808ef0e6:46081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:37,118 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:37,118 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:37,118 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:37,118 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:37,118 INFO [RS:0;5ed4808ef0e6:46081 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46081 2024-11-21T00:20:37,119 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:37,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467/rs 2024-11-21T00:20:37,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1330347467/rs/5ed4808ef0e6,46081,1732148421879 2024-11-21T00:20:37,255 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:37,255 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,46081,1732148421879] 2024-11-21T00:20:37,275 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-1330347467/draining/5ed4808ef0e6,46081,1732148421879 already deleted, retry=false 2024-11-21T00:20:37,276 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,46081,1732148421879 expired; onlineServers=0 2024-11-21T00:20:37,276 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,37261,1732148421757' ***** 2024-11-21T00:20:37,276 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:20:37,276 INFO [M:0;5ed4808ef0e6:37261 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:37,276 INFO [M:0;5ed4808ef0e6:37261 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:37,276 DEBUG [M:0;5ed4808ef0e6:37261 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:20:37,276 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:20:37,276 DEBUG [M:0;5ed4808ef0e6:37261 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:20:37,276 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148424205 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148424205,5,FailOnTimeoutGroup] 2024-11-21T00:20:37,276 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148424214 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148424214,5,FailOnTimeoutGroup] 2024-11-21T00:20:37,276 INFO [M:0;5ed4808ef0e6:37261 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:37,276 INFO [M:0;5ed4808ef0e6:37261 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:37,276 DEBUG [M:0;5ed4808ef0e6:37261 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:20:37,276 INFO [M:0;5ed4808ef0e6:37261 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:20:37,276 INFO [M:0;5ed4808ef0e6:37261 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:37,276 INFO [M:0;5ed4808ef0e6:37261 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:20:37,276 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:20:37,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1330347467/master 2024-11-21T00:20:37,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1330347467 2024-11-21T00:20:37,286 DEBUG [M:0;5ed4808ef0e6:37261 {}] zookeeper.ZKUtil(347): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Unable to get data of znode /1-1330347467/master because node does not exist (not an error) 2024-11-21T00:20:37,286 WARN [M:0;5ed4808ef0e6:37261 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:20:37,287 INFO [M:0;5ed4808ef0e6:37261 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/.lastflushedseqids 2024-11-21T00:20:37,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741836_1012 (size=99) 2024-11-21T00:20:37,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:37,365 INFO [RS:0;5ed4808ef0e6:46081 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:37,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46081-0x1015ac3425c0004, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:37,365 INFO [RS:0;5ed4808ef0e6:46081 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,46081,1732148421879; zookeeper connection closed. 2024-11-21T00:20:37,366 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4643db6a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4643db6a 2024-11-21T00:20:37,366 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:20:37,682 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:37,702 INFO [M:0;5ed4808ef0e6:37261 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:20:37,702 INFO [M:0;5ed4808ef0e6:37261 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:20:37,702 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:37,702 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:37,702 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:37,702 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:37,702 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:37,703 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.62 KB heapSize=11.22 KB 2024-11-21T00:20:37,721 DEBUG [M:0;5ed4808ef0e6:37261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77334027503846ea93fad8c573ec3488 is 82, key is hbase:meta,,1/info:regioninfo/1732148425414/Put/seqid=0 2024-11-21T00:20:37,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741837_1013 (size=5672) 2024-11-21T00:20:37,979 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:38,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:38,004 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:38,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:38,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:38,011 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:38,126 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77334027503846ea93fad8c573ec3488 2024-11-21T00:20:38,143 DEBUG [M:0;5ed4808ef0e6:37261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14983cd162c34a83b38a81679eb616fe is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732148425436/Put/seqid=0 2024-11-21T00:20:38,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741838_1014 (size=5275) 2024-11-21T00:20:38,547 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14983cd162c34a83b38a81679eb616fe 2024-11-21T00:20:38,569 DEBUG [M:0;5ed4808ef0e6:37261 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fa20e1887c0247e8b6852f6ca3105a79 is 69, key is 5ed4808ef0e6,46081,1732148421879/rs:state/1732148424255/Put/seqid=0 2024-11-21T00:20:38,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741839_1015 (size=5156) 2024-11-21T00:20:38,790 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:38,973 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=28 (bloomFilter=true), to=hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fa20e1887c0247e8b6852f6ca3105a79 2024-11-21T00:20:38,979 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/77334027503846ea93fad8c573ec3488 as hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/77334027503846ea93fad8c573ec3488 2024-11-21T00:20:38,984 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/77334027503846ea93fad8c573ec3488, entries=8, sequenceid=28, filesize=5.5 K 2024-11-21T00:20:38,985 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/14983cd162c34a83b38a81679eb616fe as hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/14983cd162c34a83b38a81679eb616fe 2024-11-21T00:20:38,990 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/14983cd162c34a83b38a81679eb616fe, entries=3, sequenceid=28, filesize=5.2 K 2024-11-21T00:20:38,991 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fa20e1887c0247e8b6852f6ca3105a79 as hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fa20e1887c0247e8b6852f6ca3105a79 2024-11-21T00:20:38,996 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46557/user/jenkins/test-data/6595f5f9-8816-f082-15e4-1970fb6f1fa2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fa20e1887c0247e8b6852f6ca3105a79, entries=1, sequenceid=28, filesize=5.0 K 2024-11-21T00:20:38,997 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1295ms, sequenceid=28, compaction requested=false 2024-11-21T00:20:38,998 INFO [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:38,998 DEBUG [M:0;5ed4808ef0e6:37261 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148437702Disabling compacts and flushes for region at 1732148437702Disabling writes for close at 1732148437702Obtaining lock to block concurrent updates at 1732148437703 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148437703Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7802, getHeapSize=11424, getOffHeapSize=0, getCellsCount=35 at 1732148437703Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148437703Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148437704 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148437721 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148437721Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148438129 (+408 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148438143 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148438143Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148438552 (+409 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148438568 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148438568Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f328612: reopening flushed file at 1732148438978 (+410 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@289d2135: reopening flushed file at 1732148438984 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77eba5f2: reopening flushed file at 1732148438990 (+6 ms)Finished flush of dataSize ~7.62 KB/7802, heapSize ~10.92 KB/11184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1295ms, sequenceid=28, compaction requested=false at 1732148438997 (+7 ms)Writing region close event to WAL at 1732148438998 (+1 ms)Closed at 1732148438998 2024-11-21T00:20:39,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44331 is added to blk_1073741830_1006 (size=10165) 2024-11-21T00:20:39,002 INFO [M:0;5ed4808ef0e6:37261 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:20:39,002 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:39,002 INFO [M:0;5ed4808ef0e6:37261 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37261 2024-11-21T00:20:39,003 INFO [M:0;5ed4808ef0e6:37261 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:39,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:39,152 INFO [M:0;5ed4808ef0e6:37261 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:39,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37261-0x1015ac3425c0003, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:39,194 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6ed81ed8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:39,194 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@76ed34c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:39,194 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:39,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8bcaca2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:39,195 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a6ccddd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:39,196 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:20:39,196 WARN [BP-415794961-172.17.0.2-1732148419765 heartbeating to localhost/127.0.0.1:46557 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:20:39,196 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:20:39,196 WARN [BP-415794961-172.17.0.2-1732148419765 heartbeating to localhost/127.0.0.1:46557 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-415794961-172.17.0.2-1732148419765 (Datanode Uuid 7b4fdc52-703c-45d9-8eb2-e29b54908140) service to localhost/127.0.0.1:46557 2024-11-21T00:20:39,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/cluster_717caa8b-a902-095b-3387-8e7184815b96/data/data1/current/BP-415794961-172.17.0.2-1732148419765 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:39,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/cluster_717caa8b-a902-095b-3387-8e7184815b96/data/data2/current/BP-415794961-172.17.0.2-1732148419765 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:39,197 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:20:39,201 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7fd11360{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:39,202 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ca7e3da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:39,202 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:39,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b8c3687{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:39,202 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c2610f9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:39,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:20:39,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:20:39,212 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:39,212 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:546) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:39,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:39,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:39,212 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:39,212 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:20:39,212 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1762510743, stopped=false 2024-11-21T00:20:39,213 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,44785,1732148430662 2024-11-21T00:20:39,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:39,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/running 2024-11-21T00:20:39,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:39,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:39,233 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:39,233 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:39,234 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testBasePeerConfigsRemovalForReplicationPeer(TestMasterReplication.java:546) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:39,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:39,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:39,234 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,35081,1732148430736' ***** 2024-11-21T00:20:39,234 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:20:39,234 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Set watcher on znode that does not yet exist, /0-586781601/running 2024-11-21T00:20:39,234 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:20:39,234 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:20:39,234 INFO [RS:0;5ed4808ef0e6:35081 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:20:39,234 INFO [RS:0;5ed4808ef0e6:35081 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(3091): Received CLOSE for be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:35081. 2024-11-21T00:20:39,235 DEBUG [RS:0;5ed4808ef0e6:35081 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:39,235 DEBUG [RS:0;5ed4808ef0e6:35081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing be07aaacfffca5950a9623d74f366e3c, disabling compactions & flushes 2024-11-21T00:20:39,235 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. after waiting 0 ms 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:20:39,235 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:20:39,235 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, be07aaacfffca5950a9623d74f366e3c=hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c.} 2024-11-21T00:20:39,235 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, be07aaacfffca5950a9623d74f366e3c 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:39,235 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:39,235 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:39,235 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=952 B heapSize=2.52 KB 2024-11-21T00:20:39,238 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/replication/be07aaacfffca5950a9623d74f366e3c/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=8 2024-11-21T00:20:39,239 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:39,239 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:39,239 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:39,239 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for be07aaacfffca5950a9623d74f366e3c: Waiting for close lock at 1732148439235Running coprocessor pre-close hooks at 1732148439235Disabling compacts and flushes for region at 1732148439235Disabling writes for close at 1732148439235Writing region close event to WAL at 1732148439235Running coprocessor post-close hooks at 1732148439239 (+4 ms)Closed at 1732148439239 2024-11-21T00:20:39,239 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c. 2024-11-21T00:20:39,240 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:39,249 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/info/c5ed565ead2b4669ad23798acb2842e5 is 147, key is hbase:replication,,1732148425573.be07aaacfffca5950a9623d74f366e3c./info:regioninfo/1732148436756/Put/seqid=0 2024-11-21T00:20:39,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741851_1027 (size=6385) 2024-11-21T00:20:39,435 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:39,636 DEBUG [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:20:39,653 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=952 B at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/info/c5ed565ead2b4669ad23798acb2842e5 2024-11-21T00:20:39,660 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/.tmp/info/c5ed565ead2b4669ad23798acb2842e5 as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/info/c5ed565ead2b4669ad23798acb2842e5 2024-11-21T00:20:39,665 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/info/c5ed565ead2b4669ad23798acb2842e5, entries=8, sequenceid=19, filesize=6.2 K 2024-11-21T00:20:39,666 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~952 B/952, heapSize ~1.76 KB/1800, currentSize=0 B/0 for 1588230740 in 431ms, sequenceid=19, compaction requested=false 2024-11-21T00:20:39,670 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/data/hbase/meta/1588230740/recovered.edits/22.seqid, newMaxSeqId=22, maxSeqId=14 2024-11-21T00:20:39,670 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:39,670 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:39,670 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:39,671 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148439235Running coprocessor pre-close hooks at 1732148439235Disabling compacts and flushes for region at 1732148439235Disabling writes for close at 1732148439235Obtaining lock to block concurrent updates at 1732148439235Preparing flush snapshotting stores in 1588230740 at 1732148439235Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=952, getHeapSize=2520, getOffHeapSize=0, getCellsCount=8 at 1732148439236 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148439236Flushing 1588230740/info: creating writer at 1732148439236Flushing 1588230740/info: appending metadata at 1732148439248 (+12 ms)Flushing 1588230740/info: closing flushed file at 1732148439248Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bc6e421: reopening flushed file at 1732148439659 (+411 ms)Finished flush of dataSize ~952 B/952, heapSize ~1.76 KB/1800, currentSize=0 B/0 for 1588230740 in 431ms, sequenceid=19, compaction requested=false at 1732148439666 (+7 ms)Writing region close event to WAL at 1732148439667 (+1 ms)Running coprocessor post-close hooks at 1732148439670 (+3 ms)Closed at 1732148439670 2024-11-21T00:20:39,671 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:39,836 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,35081,1732148430736; all regions closed. 2024-11-21T00:20:39,838 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.meta.1732148432454.meta not finished, retry = 0 2024-11-21T00:20:39,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741849_1025 (size=2156) 2024-11-21T00:20:39,940 DEBUG [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs 2024-11-21T00:20:39,940 INFO [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C35081%2C1732148430736.meta:.meta(num 1732148432454) 2024-11-21T00:20:39,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741850_1026 (size=796) 2024-11-21T00:20:39,942 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.rep.1732148436713 not finished, retry = 0 2024-11-21T00:20:39,994 DEBUG [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 to pos 0, reset compression=false 2024-11-21T00:20:40,035 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:20:40,035 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:20:40,045 DEBUG [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs 2024-11-21T00:20:40,045 INFO [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C35081%2C1732148430736.rep:(num 1732148436713) 2024-11-21T00:20:40,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741848_1024 (size=93) 2024-11-21T00:20:40,047 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/WALs/5ed4808ef0e6,35081,1732148430736/5ed4808ef0e6%2C35081%2C1732148430736.1732148432195 not finished, retry = 0 2024-11-21T00:20:40,149 DEBUG [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/oldWALs 2024-11-21T00:20:40,149 INFO [RS:0;5ed4808ef0e6:35081 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C35081%2C1732148430736:(num 1732148432195) 2024-11-21T00:20:40,149 DEBUG [RS:0;5ed4808ef0e6:35081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:40,149 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:20:40,149 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:40,149 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:40,149 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:40,149 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:40,149 WARN [RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0-0 {event_type=RS_CLAIM_REPLICATION_QUEUE, pid=15}] regionserver.ReplicationSourceManager(915): Interrupted while waiting before transferring a queue. 2024-11-21T00:20:40,150 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,35081,1732148430736 because: Region server is closing 2024-11-21T00:20:40,150 INFO [RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0-0 {event_type=RS_CLAIM_REPLICATION_QUEUE, pid=15}] regionserver.ReplicationSourceManager(920): Not transferring queue since we are shutting down 2024-11-21T00:20:40,150 DEBUG [RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0-0 {event_type=RS_CLAIM_REPLICATION_QUEUE, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-21T00:20:40,150 INFO [RS:0;5ed4808ef0e6:35081 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:35081. 2024-11-21T00:20:40,150 DEBUG [RS:0;5ed4808ef0e6:35081 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:40,150 DEBUG [RS:0;5ed4808ef0e6:35081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:40,150 DEBUG [RS:0;5ed4808ef0e6:35081 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:40,150 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:40,250 WARN [RS:0;5ed4808ef0e6:35081.replicationSource.wal-reader.5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:20:40,250 WARN [RS:0;5ed4808ef0e6:35081.replicationSource.shipper5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:20:40,250 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS:0;5ed4808ef0e6:35081.replicationSource.shipper5ed4808ef0e6%2C35081%2C1732148430736,1-5ed4808ef0e6,35081,1732148430736 terminated 2024-11-21T00:20:40,251 INFO [RS:0;5ed4808ef0e6:35081 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35081 2024-11-21T00:20:40,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/rs/5ed4808ef0e6,35081,1732148430736 2024-11-21T00:20:40,262 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:40,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601/rs 2024-11-21T00:20:40,275 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,35081,1732148430736] 2024-11-21T00:20:40,286 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/draining/5ed4808ef0e6,35081,1732148430736 already deleted, retry=false 2024-11-21T00:20:40,286 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,35081,1732148430736 expired; onlineServers=0 2024-11-21T00:20:40,286 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,44785,1732148430662' ***** 2024-11-21T00:20:40,286 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:20:40,286 INFO [M:0;5ed4808ef0e6:44785 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:20:40,286 INFO [M:0;5ed4808ef0e6:44785 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:20:40,287 DEBUG [M:0;5ed4808ef0e6:44785 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:20:40,287 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:20:40,287 DEBUG [M:0;5ed4808ef0e6:44785 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:20:40,287 INFO [M:0;5ed4808ef0e6:44785 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:20:40,287 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148431869 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148431869,5,FailOnTimeoutGroup] 2024-11-21T00:20:40,287 INFO [M:0;5ed4808ef0e6:44785 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:20:40,287 DEBUG [M:0;5ed4808ef0e6:44785 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:20:40,287 INFO [M:0;5ed4808ef0e6:44785 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:20:40,287 INFO [M:0;5ed4808ef0e6:44785 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:20:40,287 INFO [M:0;5ed4808ef0e6:44785 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:20:40,287 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148431869 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148431869,5,FailOnTimeoutGroup] 2024-11-21T00:20:40,287 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:20:40,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-586781601/master 2024-11-21T00:20:40,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-586781601 2024-11-21T00:20:40,338 DEBUG [M:0;5ed4808ef0e6:44785 {}] zookeeper.RecoverableZooKeeper(212): Node /0-586781601/master already deleted, retry=false 2024-11-21T00:20:40,338 DEBUG [M:0;5ed4808ef0e6:44785 {}] master.ActiveMasterManager(353): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Failed delete of our master address node; KeeperErrorCode = NoNode for /0-586781601/master 2024-11-21T00:20:40,339 INFO [M:0;5ed4808ef0e6:44785 {}] master.ServerManager(1134): Rewriting .lastflushedseqids file at: hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/.lastflushedseqids 2024-11-21T00:20:40,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741852_1028 (size=181) 2024-11-21T00:20:40,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:40,376 INFO [RS:0;5ed4808ef0e6:35081 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:40,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35081-0x1015ac3425c0007, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:40,376 INFO [RS:0;5ed4808ef0e6:35081 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,35081,1732148430736; zookeeper connection closed. 2024-11-21T00:20:40,376 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@59113cdf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@59113cdf 2024-11-21T00:20:40,376 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:20:40,762 INFO [M:0;5ed4808ef0e6:44785 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:20:40,762 INFO [M:0;5ed4808ef0e6:44785 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:20:40,763 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:40,763 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:40,763 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:40,763 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:40,763 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:40,763 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=18.30 KB heapSize=24.09 KB 2024-11-21T00:20:40,781 DEBUG [M:0;5ed4808ef0e6:44785 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d65b792923ea4060862b4db717999ae2 is 82, key is hbase:meta,,1/info:regioninfo/1732148432534/Put/seqid=0 2024-11-21T00:20:40,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741853_1029 (size=5672) 2024-11-21T00:20:41,185 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d65b792923ea4060862b4db717999ae2 2024-11-21T00:20:41,204 DEBUG [M:0;5ed4808ef0e6:44785 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da25e80542c947a4a1c9727a99ca5a55 is 375, key is \x00\x00\x00\x00\x00\x00\x00\x0C/proc:d/1732148436763/Put/seqid=0 2024-11-21T00:20:41,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741854_1030 (size=7312) 2024-11-21T00:20:41,608 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.69 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da25e80542c947a4a1c9727a99ca5a55 2024-11-21T00:20:41,625 DEBUG [M:0;5ed4808ef0e6:44785 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4dca39d5b9fb4103a13ec06eb0098d08 is 69, key is 5ed4808ef0e6,35081,1732148430736/rs:state/1732148431926/Put/seqid=0 2024-11-21T00:20:41,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741855_1031 (size=5370) 2024-11-21T00:20:42,029 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=119 B at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4dca39d5b9fb4103a13ec06eb0098d08 2024-11-21T00:20:42,034 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4dca39d5b9fb4103a13ec06eb0098d08 2024-11-21T00:20:42,035 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d65b792923ea4060862b4db717999ae2 as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d65b792923ea4060862b4db717999ae2 2024-11-21T00:20:42,040 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d65b792923ea4060862b4db717999ae2, entries=8, sequenceid=127, filesize=5.5 K 2024-11-21T00:20:42,041 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/da25e80542c947a4a1c9727a99ca5a55 as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da25e80542c947a4a1c9727a99ca5a55 2024-11-21T00:20:42,045 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/da25e80542c947a4a1c9727a99ca5a55, entries=7, sequenceid=127, filesize=7.1 K 2024-11-21T00:20:42,046 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4dca39d5b9fb4103a13ec06eb0098d08 as hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4dca39d5b9fb4103a13ec06eb0098d08 2024-11-21T00:20:42,051 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4dca39d5b9fb4103a13ec06eb0098d08 2024-11-21T00:20:42,051 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34383/user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4dca39d5b9fb4103a13ec06eb0098d08, entries=2, sequenceid=127, filesize=5.2 K 2024-11-21T00:20:42,052 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.30 KB/18738, heapSize ~23.79 KB/24360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1289ms, sequenceid=127, compaction requested=false 2024-11-21T00:20:42,053 INFO [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:42,053 DEBUG [M:0;5ed4808ef0e6:44785 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148440763Disabling compacts and flushes for region at 1732148440763Disabling writes for close at 1732148440763Obtaining lock to block concurrent updates at 1732148440763Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148440763Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=18738, getHeapSize=24600, getOffHeapSize=0, getCellsCount=65 at 1732148440763Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148440764 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148440764Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148440781 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148440781Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148441190 (+409 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148441203 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148441203Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148441612 (+409 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148441625 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148441625Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24960752: reopening flushed file at 1732148442034 (+409 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e41b5de: reopening flushed file at 1732148442040 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40a90964: reopening flushed file at 1732148442045 (+5 ms)Finished flush of dataSize ~18.30 KB/18738, heapSize ~23.79 KB/24360, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1289ms, sequenceid=127, compaction requested=false at 1732148442052 (+7 ms)Writing region close event to WAL at 1732148442053 (+1 ms)Closed at 1732148442053 2024-11-21T00:20:42,055 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/ed35b926-5ac3-cdb8-e1ac-2df1c0dcecf0/MasterData/WALs/5ed4808ef0e6,44785,1732148430662/5ed4808ef0e6%2C44785%2C1732148430662.1732148431301 not finished, retry = 0 2024-11-21T00:20:42,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42405 is added to blk_1073741847_1023 (size=22903) 2024-11-21T00:20:42,156 INFO [M:0;5ed4808ef0e6:44785 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:20:42,156 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:20:42,156 INFO [M:0;5ed4808ef0e6:44785 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44785 2024-11-21T00:20:42,156 INFO [M:0;5ed4808ef0e6:44785 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:20:42,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:42,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44785-0x1015ac3425c0006, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:20:42,377 INFO [M:0;5ed4808ef0e6:44785 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:20:42,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4dabdac5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:42,379 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6de60506{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:42,379 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:42,379 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2918976{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:42,380 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@58ef04de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:42,381 WARN [BP-1803312530-172.17.0.2-1732148414147 heartbeating to localhost/127.0.0.1:34383 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:20:42,381 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:20:42,381 WARN [BP-1803312530-172.17.0.2-1732148414147 heartbeating to localhost/127.0.0.1:34383 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1803312530-172.17.0.2-1732148414147 (Datanode Uuid a76a2ea5-fea7-45d7-9483-ecc9530ddbb1) service to localhost/127.0.0.1:34383 2024-11-21T00:20:42,381 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:20:42,381 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2/data/data1/current/BP-1803312530-172.17.0.2-1732148414147 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:42,382 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/cluster_c8efc952-0b7e-7831-bfe8-4027654234a2/data/data2/current/BP-1803312530-172.17.0.2-1732148414147 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:20:42,382 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:20:42,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60f47f1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:42,387 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1341e1bf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:20:42,387 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:20:42,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a802bd1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:20:42,387 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@39c4d09a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a6ca4b59-1a65-96d0-10c3-4c31346bd740/hadoop.log.dir/,STOPPED} 2024-11-21T00:20:42,393 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:20:42,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:20:42,416 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testBasePeerConfigsRemovalForReplicationPeer Thread=204 (was 159) Potentially hanging thread: LeaseRenewer:jenkins.hfs.9@localhost:34383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46557 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62031) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-21-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-18-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62031) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46557 from jenkins.hfs.8 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-22-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-21-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-20-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34383 from jenkins.hfs.9 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-22-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-18-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-20-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:34383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46557 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-19-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-20-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-22-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-21-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34383 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34383 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46557 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.8@localhost:46557 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46557 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46557 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34383 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-19-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-18-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46557 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46557 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-19-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46557 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=570 (was 503) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=746 (was 886), ProcessCount=11 (was 11), AvailableMemoryMB=824 (was 662) - AvailableMemoryMB LEAK? - 2024-11-21T00:20:42,424 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testHFileCyclicReplication Thread=204, OpenFileDescriptor=570, MaxFileDescriptor=1048576, SystemLoadAverage=746, ProcessCount=11, AvailableMemoryMB=824 2024-11-21T00:20:42,436 INFO [Time-limited test {}] replication.TestMasterReplication(190): testHFileCyclicReplication 2024-11-21T00:20:42,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.log.dir so I do NOT create it in target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f 2024-11-21T00:20:42,437 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:20:42,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.tmp.dir so I do NOT create it in target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f 2024-11-21T00:20:42,437 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/f22fe51b-cbb2-0281-42ea-152e342d4e62/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:20:42,437 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f 2024-11-21T00:20:42,437 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd, deleteOnExit=true 2024-11-21T00:20:42,460 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/zookeeper_0, clientPort=49683, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:20:42,461 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49683 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/test.cache.data in system properties and HBase conf 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:20:42,461 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:20:42,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:20:42,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:20:42,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015ac3425c0002, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:20:42,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015ac3425c0005, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:20:42,491 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015ac3425c0002, quorum=127.0.0.1:62031, baseZNode=/0-586781601 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:20:42,491 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015ac3425c0005, quorum=127.0.0.1:62031, baseZNode=/1-1330347467 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:20:42,831 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:42,835 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:42,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:42,838 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:42,838 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:20:42,839 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:42,839 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@52454770{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:42,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@723c962b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:42,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f06b92f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/java.io.tmpdir/jetty-localhost-41239-hadoop-hdfs-3_4_1-tests_jar-_-any-10910533894071372726/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:42,945 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78a383c6{HTTP/1.1, (http/1.1)}{localhost:41239} 2024-11-21T00:20:42,945 INFO [Time-limited test {}] server.Server(415): Started @177494ms 2024-11-21T00:20:43,252 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:43,255 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:43,259 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:43,259 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:43,259 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:20:43,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15baf432{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:43,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f7e5bce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:43,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d48843c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/java.io.tmpdir/jetty-localhost-40451-hadoop-hdfs-3_4_1-tests_jar-_-any-2936528697128942735/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:43,362 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c8a0263{HTTP/1.1, (http/1.1)}{localhost:40451} 2024-11-21T00:20:43,362 INFO [Time-limited test {}] server.Server(415): Started @177912ms 2024-11-21T00:20:43,364 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:20:44,104 WARN [Thread-1686 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1/current/BP-913626249-172.17.0.2-1732148442480/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:44,105 WARN [Thread-1687 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2/current/BP-913626249-172.17.0.2-1732148442480/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:44,122 WARN [Thread-1674 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:20:44,125 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae51b6b1d286aae8 with lease ID 0x62d3fea1e1fe78ea: Processing first storage report for DS-d31d32eb-9dd6-4e7a-9d41-c4adf50f3925 from datanode DatanodeRegistration(127.0.0.1:45219, datanodeUuid=3e85ffda-9d86-4eae-9d00-b3b7874682a7, infoPort=35227, infoSecurePort=0, ipcPort=39975, storageInfo=lv=-57;cid=testClusterID;nsid=1958868931;c=1732148442480) 2024-11-21T00:20:44,125 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae51b6b1d286aae8 with lease ID 0x62d3fea1e1fe78ea: from storage DS-d31d32eb-9dd6-4e7a-9d41-c4adf50f3925 node DatanodeRegistration(127.0.0.1:45219, datanodeUuid=3e85ffda-9d86-4eae-9d00-b3b7874682a7, infoPort=35227, infoSecurePort=0, ipcPort=39975, storageInfo=lv=-57;cid=testClusterID;nsid=1958868931;c=1732148442480), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:44,125 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xae51b6b1d286aae8 with lease ID 0x62d3fea1e1fe78ea: Processing first storage report for DS-9cb9ca25-e187-4b71-a201-67ae6ba27d23 from datanode DatanodeRegistration(127.0.0.1:45219, datanodeUuid=3e85ffda-9d86-4eae-9d00-b3b7874682a7, infoPort=35227, infoSecurePort=0, ipcPort=39975, storageInfo=lv=-57;cid=testClusterID;nsid=1958868931;c=1732148442480) 2024-11-21T00:20:44,125 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xae51b6b1d286aae8 with lease ID 0x62d3fea1e1fe78ea: from storage DS-9cb9ca25-e187-4b71-a201-67ae6ba27d23 node DatanodeRegistration(127.0.0.1:45219, datanodeUuid=3e85ffda-9d86-4eae-9d00-b3b7874682a7, infoPort=35227, infoSecurePort=0, ipcPort=39975, storageInfo=lv=-57;cid=testClusterID;nsid=1958868931;c=1732148442480), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:44,216 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f 2024-11-21T00:20:44,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:44,218 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:44,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:20:44,637 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a with version=8 2024-11-21T00:20:44,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/hbase-staging 2024-11-21T00:20:44,639 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:44,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:44,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:44,639 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:44,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:44,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:44,639 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:20:44,639 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:44,640 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35861 2024-11-21T00:20:44,640 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35861 connecting to ZooKeeper ensemble=127.0.0.1:49683 2024-11-21T00:20:44,741 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:44,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:44,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:44,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:44,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:44,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:44,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:44,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:44,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:358610x0, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:44,823 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35861-0x1015ac3b1060000 connected 2024-11-21T00:20:44,949 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:44,950 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:44,952 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on znode that does not yet exist, /0-657279644/running 2024-11-21T00:20:44,952 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a, hbase.cluster.distributed=false 2024-11-21T00:20:44,954 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on znode that does not yet exist, /0-657279644/acl 2024-11-21T00:20:44,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35861 2024-11-21T00:20:44,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35861 2024-11-21T00:20:44,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35861 2024-11-21T00:20:44,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35861 2024-11-21T00:20:44,963 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35861 2024-11-21T00:20:44,978 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:44,978 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:44,979 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:44,979 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:44,979 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:44,979 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:44,979 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:20:44,979 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:44,979 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33745 2024-11-21T00:20:44,980 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33745 connecting to ZooKeeper ensemble=127.0.0.1:49683 2024-11-21T00:20:44,981 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:44,982 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:44,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337450x0, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:44,992 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33745-0x1015ac3b1060001 connected 2024-11-21T00:20:44,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on znode that does not yet exist, /0-657279644/running 2024-11-21T00:20:44,992 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:20:44,993 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:20:44,994 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on znode that does not yet exist, /0-657279644/master 2024-11-21T00:20:44,995 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on znode that does not yet exist, /0-657279644/acl 2024-11-21T00:20:44,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33745 2024-11-21T00:20:44,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33745 2024-11-21T00:20:44,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33745 2024-11-21T00:20:44,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33745 2024-11-21T00:20:44,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33745 2024-11-21T00:20:45,014 DEBUG [M:0;5ed4808ef0e6:35861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:35861 2024-11-21T00:20:45,015 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0-657279644/backup-masters/5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:45,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644/backup-masters 2024-11-21T00:20:45,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644/backup-masters 2024-11-21T00:20:45,023 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on existing znode=/0-657279644/backup-masters/5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:45,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:45,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-657279644/master 2024-11-21T00:20:45,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:45,033 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on existing znode=/0-657279644/master 2024-11-21T00:20:45,034 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0-657279644/backup-masters/5ed4808ef0e6,35861,1732148444638 from backup master directory 2024-11-21T00:20:45,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0-657279644/backup-masters/5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:45,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644/backup-masters 2024-11-21T00:20:45,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644/backup-masters 2024-11-21T00:20:45,118 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:45,118 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:45,122 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/hbase.id] with ID: 00ce61ad-d457-45d3-9d65-31dda6579f89 2024-11-21T00:20:45,122 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/.tmp/hbase.id 2024-11-21T00:20:45,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:20:45,526 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/.tmp/hbase.id]:[hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/hbase.id] 2024-11-21T00:20:45,534 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:45,535 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:20:45,535 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 0ms. 2024-11-21T00:20:45,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:45,580 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:45,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:20:45,985 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:45,986 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:20:45,987 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:45,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:20:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:20:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:20:46,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:46,393 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/data/master/store 2024-11-21T00:20:46,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:20:46,798 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:46,798 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:46,798 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:46,798 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:46,798 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:46,799 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:46,799 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:46,799 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148446798Disabling compacts and flushes for region at 1732148446798Disabling writes for close at 1732148446799 (+1 ms)Writing region close event to WAL at 1732148446799Closed at 1732148446799 2024-11-21T00:20:46,799 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/data/master/store/.initializing 2024-11-21T00:20:46,799 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/WALs/5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:46,800 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:46,802 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C35861%2C1732148444638, suffix=, logDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/WALs/5ed4808ef0e6,35861,1732148444638, archiveDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/oldWALs, maxLogs=10 2024-11-21T00:20:46,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/WALs/5ed4808ef0e6,35861,1732148444638/5ed4808ef0e6%2C35861%2C1732148444638.1732148446802, exclude list is [], retry=0 2024-11-21T00:20:46,816 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45219,DS-d31d32eb-9dd6-4e7a-9d41-c4adf50f3925,DISK] 2024-11-21T00:20:46,818 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/WALs/5ed4808ef0e6,35861,1732148444638/5ed4808ef0e6%2C35861%2C1732148444638.1732148446802 2024-11-21T00:20:46,818 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227)] 2024-11-21T00:20:46,818 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:46,818 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:46,818 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,818 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:20:46,820 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:46,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:46,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:20:46,822 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:46,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:46,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:20:46,823 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:46,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:46,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:20:46,824 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:46,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:46,824 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,825 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,825 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,826 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,826 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,827 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:46,828 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:46,831 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:46,831 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63927420, jitterRate=-0.04740720987319946}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:46,832 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148446818Initializing all the Stores at 1732148446819 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148446819Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148446819Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148446819Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148446819Cleaning up temporary data from old regions at 1732148446826 (+7 ms)Region opened successfully at 1732148446832 (+6 ms) 2024-11-21T00:20:46,832 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:20:46,834 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39b2a29a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:46,835 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:20:46,835 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:20:46,835 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:20:46,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:20:46,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:20:46,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:20:46,836 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:20:46,838 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:20:46,838 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Unable to get data of znode /0-657279644/balancer because node does not exist (not necessarily an error) 2024-11-21T00:20:46,875 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-657279644/balancer already deleted, retry=false 2024-11-21T00:20:46,875 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:20:46,876 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Unable to get data of znode /0-657279644/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:20:46,904 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-657279644/normalizer already deleted, retry=false 2024-11-21T00:20:46,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:20:46,905 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Unable to get data of znode /0-657279644/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:20:46,917 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-657279644/switch/split already deleted, retry=false 2024-11-21T00:20:46,918 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Unable to get data of znode /0-657279644/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:20:46,927 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-657279644/switch/merge already deleted, retry=false 2024-11-21T00:20:46,929 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Unable to get data of znode /0-657279644/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:20:46,938 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0-657279644/snapshot-cleanup already deleted, retry=false 2024-11-21T00:20:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-657279644/running 2024-11-21T00:20:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0-657279644/running 2024-11-21T00:20:46,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:46,949 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,35861,1732148444638, sessionid=0x1015ac3b1060000, setting cluster-up flag (Was=false) 2024-11-21T00:20:46,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:46,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:47,001 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-657279644/flush-table-proc/acquired, /0-657279644/flush-table-proc/reached, /0-657279644/flush-table-proc/abort 2024-11-21T00:20:47,002 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:47,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:47,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:47,053 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0-657279644/online-snapshot/acquired, /0-657279644/online-snapshot/reached, /0-657279644/online-snapshot/abort 2024-11-21T00:20:47,054 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:47,055 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:20:47,056 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:47,056 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:20:47,057 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:20:47,057 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitoring.TaskMonitor(166): Status Processing ServerCrashProcedure of 5ed4808ef0e6,41953,1732148416380: status=Processing ServerCrashProcedure of 5ed4808ef0e6,41953,1732148416380 current State SERVER_CRASH_CLAIM_REPLICATION_QUEUES, state=RUNNING, startTime=1732148431864, completionTime=-1 appears to have been leaked 2024-11-21T00:20:47,057 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,35861,1732148444638 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:47,058 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,059 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:47,059 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:20:47,060 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,060 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:20:47,064 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148477064 2024-11-21T00:20:47,064 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:20:47,064 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:20:47,064 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:20:47,064 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:20:47,064 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:20:47,064 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:20:47,065 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,067 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:20:47,067 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:20:47,067 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:20:47,067 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:20:47,068 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:20:47,068 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:20:47,068 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148447068,5,FailOnTimeoutGroup] 2024-11-21T00:20:47,070 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148447068,5,FailOnTimeoutGroup] 2024-11-21T00:20:47,070 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,070 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:20:47,070 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,070 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:20:47,101 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(746): ClusterId : 00ce61ad-d457-45d3-9d65-31dda6579f89 2024-11-21T00:20:47,101 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:20:47,107 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:20:47,107 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:20:47,117 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:20:47,118 DEBUG [RS:0;5ed4808ef0e6:33745 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14db015c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:47,129 DEBUG [RS:0;5ed4808ef0e6:33745 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:33745 2024-11-21T00:20:47,129 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:20:47,129 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:20:47,129 DEBUG [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:20:47,130 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,35861,1732148444638 with port=33745, startcode=1732148444978 2024-11-21T00:20:47,130 DEBUG [RS:0;5ed4808ef0e6:33745 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:20:47,131 INFO [HMaster-EventLoopGroup-23-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35339, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:20:47,132 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,132 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,133 DEBUG [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a 2024-11-21T00:20:47,133 DEBUG [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34141 2024-11-21T00:20:47,133 DEBUG [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:20:47,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644/rs 2024-11-21T00:20:47,146 DEBUG [RS:0;5ed4808ef0e6:33745 {}] zookeeper.ZKUtil(111): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Set watcher on existing znode=/0-657279644/rs/5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,146 WARN [RS:0;5ed4808ef0e6:33745 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:47,146 INFO [RS:0;5ed4808ef0e6:33745 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:47,146 DEBUG [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,147 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,33745,1732148444978] 2024-11-21T00:20:47,149 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:20:47,151 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:20:47,151 INFO [RS:0;5ed4808ef0e6:33745 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:20:47,151 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,152 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:20:47,152 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:20:47,153 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:47,153 DEBUG [RS:0;5ed4808ef0e6:33745 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:47,155 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,155 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,155 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,155 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,156 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,156 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33745,1732148444978-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:47,169 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:20:47,169 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33745,1732148444978-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,169 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,169 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.Replication(171): 5ed4808ef0e6,33745,1732148444978 started 2024-11-21T00:20:47,183 INFO [RS:0;5ed4808ef0e6:33745 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,183 INFO [RS:0;5ed4808ef0e6:33745 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,33745,1732148444978, RpcServer on 5ed4808ef0e6/172.17.0.2:33745, sessionid=0x1015ac3b1060001 2024-11-21T00:20:47,183 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:20:47,183 DEBUG [RS:0;5ed4808ef0e6:33745 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,183 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33745,1732148444978' 2024-11-21T00:20:47,183 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-657279644/flush-table-proc/abort' 2024-11-21T00:20:47,183 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-657279644/flush-table-proc/acquired' 2024-11-21T00:20:47,184 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:20:47,184 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:20:47,184 DEBUG [RS:0;5ed4808ef0e6:33745 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,184 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33745,1732148444978' 2024-11-21T00:20:47,184 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0-657279644/online-snapshot/abort' 2024-11-21T00:20:47,184 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0-657279644/online-snapshot/acquired' 2024-11-21T00:20:47,184 DEBUG [RS:0;5ed4808ef0e6:33745 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:20:47,184 INFO [RS:0;5ed4808ef0e6:33745 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:20:47,184 INFO [RS:0;5ed4808ef0e6:33745 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:20:47,285 INFO [RS:0;5ed4808ef0e6:33745 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:47,287 INFO [RS:0;5ed4808ef0e6:33745 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33745%2C1732148444978, suffix=, logDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978, archiveDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/oldWALs, maxLogs=10 2024-11-21T00:20:47,303 DEBUG [RS:0;5ed4808ef0e6:33745 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287, exclude list is [], retry=0 2024-11-21T00:20:47,306 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45219,DS-d31d32eb-9dd6-4e7a-9d41-c4adf50f3925,DISK] 2024-11-21T00:20:47,307 INFO [RS:0;5ed4808ef0e6:33745 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 2024-11-21T00:20:47,308 DEBUG [RS:0;5ed4808ef0e6:33745 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227)] 2024-11-21T00:20:47,473 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:20:47,473 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a 2024-11-21T00:20:47,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:20:47,479 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:47,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:47,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:47,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:47,483 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:47,484 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:47,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:47,487 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:47,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:47,488 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,489 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:47,489 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/meta/1588230740 2024-11-21T00:20:47,490 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/meta/1588230740 2024-11-21T00:20:47,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:47,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:47,491 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:47,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:47,494 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:47,494 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73425084, jitterRate=0.09411901235580444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:47,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148447479Initializing all the Stores at 1732148447480 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148447480Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148447480Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148447480Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148447480Cleaning up temporary data from old regions at 1732148447491 (+11 ms)Region opened successfully at 1732148447495 (+4 ms) 2024-11-21T00:20:47,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:47,495 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:47,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:47,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:47,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:47,495 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:47,496 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148447495Disabling compacts and flushes for region at 1732148447495Disabling writes for close at 1732148447495Writing region close event to WAL at 1732148447495Closed at 1732148447495 2024-11-21T00:20:47,496 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:47,496 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:20:47,497 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:20:47,497 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:47,498 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:20:47,648 DEBUG [5ed4808ef0e6:35861 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:20:47,649 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,650 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33745,1732148444978, state=OPENING 2024-11-21T00:20:47,664 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:20:47,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:47,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:47,675 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-657279644/meta-region-server: CHANGED 2024-11-21T00:20:47,675 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-657279644/meta-region-server: CHANGED 2024-11-21T00:20:47,675 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:47,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33745,1732148444978}] 2024-11-21T00:20:47,827 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:20:47,829 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60721, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:20:47,832 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:20:47,832 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:47,832 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:20:47,834 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33745%2C1732148444978.meta, suffix=.meta, logDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978, archiveDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/oldWALs, maxLogs=10 2024-11-21T00:20:47,846 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.meta.1732148447834.meta, exclude list is [], retry=0 2024-11-21T00:20:47,848 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45219,DS-d31d32eb-9dd6-4e7a-9d41-c4adf50f3925,DISK] 2024-11-21T00:20:47,850 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.meta.1732148447834.meta 2024-11-21T00:20:47,850 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227)] 2024-11-21T00:20:47,850 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:47,850 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:47,850 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:47,850 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:20:47,850 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:20:47,850 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:20:47,850 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:47,851 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:20:47,851 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:20:47,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:47,852 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:47,852 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:47,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:47,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,853 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:47,854 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:47,854 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:47,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:47,855 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:47,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:47,855 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:47,856 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/meta/1588230740 2024-11-21T00:20:47,857 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/meta/1588230740 2024-11-21T00:20:47,858 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:47,858 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:47,858 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:47,859 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:47,860 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72608350, jitterRate=0.08194872736930847}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:47,860 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:20:47,861 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148447851Writing region info on filesystem at 1732148447851Initializing all the Stores at 1732148447851Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148447851Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148447851Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148447852 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148447852Cleaning up temporary data from old regions at 1732148447858 (+6 ms)Running coprocessor post-open hooks at 1732148447860 (+2 ms)Region opened successfully at 1732148447861 (+1 ms) 2024-11-21T00:20:47,862 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148447827 2024-11-21T00:20:47,864 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:20:47,864 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:20:47,864 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,865 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33745,1732148444978, state=OPEN 2024-11-21T00:20:47,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-657279644/meta-region-server 2024-11-21T00:20:47,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0-657279644/meta-region-server 2024-11-21T00:20:47,950 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:47,950 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-657279644/meta-region-server: CHANGED 2024-11-21T00:20:47,950 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0-657279644/meta-region-server: CHANGED 2024-11-21T00:20:47,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:20:47,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33745,1732148444978 in 275 msec 2024-11-21T00:20:47,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:20:47,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 456 msec 2024-11-21T00:20:47,956 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:47,956 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:20:47,957 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:47,957 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33745,1732148444978, seqNum=-1] 2024-11-21T00:20:47,958 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:47,959 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44335, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:47,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 907 msec 2024-11-21T00:20:47,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148447964, completionTime=-1 2024-11-21T00:20:47,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:20:47,964 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148507967 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148567967 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35861,1732148444638-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35861,1732148444638-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35861,1732148444638-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:35861, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,967 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,968 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:47,971 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.856sec 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35861,1732148444638-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:47,974 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35861,1732148444638-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:20:47,976 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:20:47,976 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:20:47,977 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35861,1732148444638-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:48,001 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f205866, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:48,001 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35861,-1 for getting cluster id 2024-11-21T00:20:48,001 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:48,003 DEBUG [HMaster-EventLoopGroup-23-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00ce61ad-d457-45d3-9d65-31dda6579f89' 2024-11-21T00:20:48,003 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:48,003 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00ce61ad-d457-45d3-9d65-31dda6579f89" 2024-11-21T00:20:48,003 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27b5c00d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:48,003 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35861,-1] 2024-11-21T00:20:48,004 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:48,004 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:48,005 INFO [HMaster-EventLoopGroup-23-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:48,005 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@471c1cc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:48,006 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:48,007 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33745,1732148444978, seqNum=-1] 2024-11-21T00:20:48,007 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:48,008 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41784, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:48,010 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:48,011 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:49683 2024-11-21T00:20:48,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:48,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015ac3b1060002 connected 2024-11-21T00:20:48,056 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.log.dir so I do NOT create it in target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce 2024-11-21T00:20:48,057 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.tmp.dir so I do NOT create it in target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce 2024-11-21T00:20:48,057 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:20:48,057 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a, deleteOnExit=true 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/test.cache.data in system properties and HBase conf 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:20:48,057 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:20:48,058 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:20:48,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:20:48,059 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:20:48,491 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:48,496 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:48,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:48,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:48,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:20:48,509 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:48,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10048048{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:48,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7882f7f4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:48,640 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44c5e204{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/java.io.tmpdir/jetty-localhost-33319-hadoop-hdfs-3_4_1-tests_jar-_-any-743156854440804740/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:20:48,644 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5cf1b482{HTTP/1.1, (http/1.1)}{localhost:33319} 2024-11-21T00:20:48,644 INFO [Time-limited test {}] server.Server(415): Started @183193ms 2024-11-21T00:20:48,929 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:20:48,932 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:20:48,934 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:20:48,934 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:20:48,935 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:20:48,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ee73a3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:20:48,936 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e58daff{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:20:49,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f510359{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/java.io.tmpdir/jetty-localhost-43351-hadoop-hdfs-3_4_1-tests_jar-_-any-9289888619808002003/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:20:49,075 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b981b4e{HTTP/1.1, (http/1.1)}{localhost:43351} 2024-11-21T00:20:49,075 INFO [Time-limited test {}] server.Server(415): Started @183624ms 2024-11-21T00:20:49,076 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:20:50,064 WARN [Thread-1807 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1/current/BP-493474764-172.17.0.2-1732148448119/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:50,072 WARN [Thread-1808 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2/current/BP-493474764-172.17.0.2-1732148448119/current, will proceed with Du for space computation calculation, 2024-11-21T00:20:50,117 WARN [Thread-1795 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:20:50,119 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f127f9d0782a988 with lease ID 0xd733c756eca48c28: Processing first storage report for DS-8923e73c-8c4a-466a-8ab7-6e1ff6f891fa from datanode DatanodeRegistration(127.0.0.1:38465, datanodeUuid=322eee9e-d660-45a8-8e25-6d82f11bcba6, infoPort=35915, infoSecurePort=0, ipcPort=41981, storageInfo=lv=-57;cid=testClusterID;nsid=902739967;c=1732148448119) 2024-11-21T00:20:50,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f127f9d0782a988 with lease ID 0xd733c756eca48c28: from storage DS-8923e73c-8c4a-466a-8ab7-6e1ff6f891fa node DatanodeRegistration(127.0.0.1:38465, datanodeUuid=322eee9e-d660-45a8-8e25-6d82f11bcba6, infoPort=35915, infoSecurePort=0, ipcPort=41981, storageInfo=lv=-57;cid=testClusterID;nsid=902739967;c=1732148448119), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:50,120 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f127f9d0782a988 with lease ID 0xd733c756eca48c28: Processing first storage report for DS-503b4406-503f-4542-ac22-ddc061ff18a5 from datanode DatanodeRegistration(127.0.0.1:38465, datanodeUuid=322eee9e-d660-45a8-8e25-6d82f11bcba6, infoPort=35915, infoSecurePort=0, ipcPort=41981, storageInfo=lv=-57;cid=testClusterID;nsid=902739967;c=1732148448119) 2024-11-21T00:20:50,120 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f127f9d0782a988 with lease ID 0xd733c756eca48c28: from storage DS-503b4406-503f-4542-ac22-ddc061ff18a5 node DatanodeRegistration(127.0.0.1:38465, datanodeUuid=322eee9e-d660-45a8-8e25-6d82f11bcba6, infoPort=35915, infoSecurePort=0, ipcPort=41981, storageInfo=lv=-57;cid=testClusterID;nsid=902739967;c=1732148448119), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:20:50,140 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce 2024-11-21T00:20:50,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:50,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:50,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:20:50,568 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911 with version=8 2024-11-21T00:20:50,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/hbase-staging 2024-11-21T00:20:50,571 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:50,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:50,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:50,572 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:50,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:50,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:50,572 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:20:50,572 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:50,577 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34653 2024-11-21T00:20:50,585 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34653 connecting to ZooKeeper ensemble=127.0.0.1:49683 2024-11-21T00:20:50,687 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:346530x0, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:50,687 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34653-0x1015ac3b1060003 connected 2024-11-21T00:20:50,938 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:50,940 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:50,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on znode that does not yet exist, /1-1254608113/running 2024-11-21T00:20:50,942 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911, hbase.cluster.distributed=false 2024-11-21T00:20:50,943 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on znode that does not yet exist, /1-1254608113/acl 2024-11-21T00:20:50,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34653 2024-11-21T00:20:50,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34653 2024-11-21T00:20:50,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34653 2024-11-21T00:20:50,945 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34653 2024-11-21T00:20:50,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34653 2024-11-21T00:20:50,960 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:20:50,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:50,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:50,960 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:20:50,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:20:50,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:20:50,960 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:20:50,960 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:20:50,961 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34237 2024-11-21T00:20:50,962 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34237 connecting to ZooKeeper ensemble=127.0.0.1:49683 2024-11-21T00:20:50,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:50,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:50,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:342370x0, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:50,978 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:342370x0, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on znode that does not yet exist, /1-1254608113/running 2024-11-21T00:20:50,978 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34237-0x1015ac3b1060004 connected 2024-11-21T00:20:50,978 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:20:50,979 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:20:50,979 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on znode that does not yet exist, /1-1254608113/master 2024-11-21T00:20:50,980 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on znode that does not yet exist, /1-1254608113/acl 2024-11-21T00:20:50,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34237 2024-11-21T00:20:50,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34237 2024-11-21T00:20:50,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34237 2024-11-21T00:20:50,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34237 2024-11-21T00:20:50,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34237 2024-11-21T00:20:51,014 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:34653 2024-11-21T00:20:51,015 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-1254608113/backup-masters/5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:51,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113/backup-masters 2024-11-21T00:20:51,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113/backup-masters 2024-11-21T00:20:51,022 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on existing znode=/1-1254608113/backup-masters/5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:51,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:51,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1254608113/master 2024-11-21T00:20:51,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:51,038 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on existing znode=/1-1254608113/master 2024-11-21T00:20:51,038 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-1254608113/backup-masters/5ed4808ef0e6,34653,1732148450571 from backup master directory 2024-11-21T00:20:51,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113/backup-masters 2024-11-21T00:20:51,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1254608113/backup-masters/5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:51,051 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:51,051 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:51,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113/backup-masters 2024-11-21T00:20:51,055 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/hbase.id] with ID: 45829647-44d5-4cc7-bba1-1ee88815a87f 2024-11-21T00:20:51,055 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/.tmp/hbase.id 2024-11-21T00:20:51,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:20:51,460 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/.tmp/hbase.id]:[hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/hbase.id] 2024-11-21T00:20:51,471 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:20:51,471 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:20:51,472 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:20:51,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:51,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:51,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:20:51,810 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:51,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:51,857 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:51,858 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:51,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:51,859 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:51,860 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:51,888 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:51,909 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:51,910 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:20:51,910 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:51,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:20:51,934 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store 2024-11-21T00:20:51,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:20:52,354 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:52,354 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:20:52,354 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:52,354 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:52,354 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:20:52,354 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:52,354 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:20:52,354 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148452354Disabling compacts and flushes for region at 1732148452354Disabling writes for close at 1732148452354Writing region close event to WAL at 1732148452354Closed at 1732148452354 2024-11-21T00:20:52,355 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/.initializing 2024-11-21T00:20:52,355 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/WALs/5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:52,356 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:52,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C34653%2C1732148450571, suffix=, logDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/WALs/5ed4808ef0e6,34653,1732148450571, archiveDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/oldWALs, maxLogs=10 2024-11-21T00:20:52,370 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/WALs/5ed4808ef0e6,34653,1732148450571/5ed4808ef0e6%2C34653%2C1732148450571.1732148452358, exclude list is [], retry=0 2024-11-21T00:20:52,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38465,DS-8923e73c-8c4a-466a-8ab7-6e1ff6f891fa,DISK] 2024-11-21T00:20:52,374 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/WALs/5ed4808ef0e6,34653,1732148450571/5ed4808ef0e6%2C34653%2C1732148450571.1732148452358 2024-11-21T00:20:52,374 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35915:35915)] 2024-11-21T00:20:52,374 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:52,374 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:52,374 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,374 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:20:52,377 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:52,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:52,377 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:20:52,378 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:52,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:52,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:20:52,379 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:52,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:52,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:20:52,381 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:52,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:52,381 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,382 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,382 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,383 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,383 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,384 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:52,385 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:20:52,391 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:52,391 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70960778, jitterRate=0.0573979914188385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:52,391 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148452374Initializing all the Stores at 1732148452375 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148452375Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148452375Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148452375Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148452375Cleaning up temporary data from old regions at 1732148452383 (+8 ms)Region opened successfully at 1732148452391 (+8 ms) 2024-11-21T00:20:52,392 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:20:52,394 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@375a3990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:52,395 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:20:52,396 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:20:52,396 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:20:52,396 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:20:52,396 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:20:52,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:20:52,397 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:20:52,402 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:20:52,408 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Unable to get data of znode /1-1254608113/balancer because node does not exist (not necessarily an error) 2024-11-21T00:20:52,419 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1254608113/balancer already deleted, retry=false 2024-11-21T00:20:52,420 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:20:52,421 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Unable to get data of znode /1-1254608113/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:20:52,432 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1254608113/normalizer already deleted, retry=false 2024-11-21T00:20:52,433 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:20:52,434 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Unable to get data of znode /1-1254608113/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:20:52,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1254608113/switch/split already deleted, retry=false 2024-11-21T00:20:52,444 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Unable to get data of znode /1-1254608113/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:20:52,453 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1254608113/switch/merge already deleted, retry=false 2024-11-21T00:20:52,455 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Unable to get data of znode /1-1254608113/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:20:52,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-1254608113/snapshot-cleanup already deleted, retry=false 2024-11-21T00:20:52,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1254608113/running 2024-11-21T00:20:52,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-1254608113/running 2024-11-21T00:20:52,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:52,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:52,475 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,34653,1732148450571, sessionid=0x1015ac3b1060003, setting cluster-up flag (Was=false) 2024-11-21T00:20:52,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:52,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:52,527 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1254608113/flush-table-proc/acquired, /1-1254608113/flush-table-proc/reached, /1-1254608113/flush-table-proc/abort 2024-11-21T00:20:52,528 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:52,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:52,580 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-1254608113/online-snapshot/acquired, /1-1254608113/online-snapshot/reached, /1-1254608113/online-snapshot/abort 2024-11-21T00:20:52,581 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:52,582 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:20:52,585 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:52,586 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:20:52,586 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:20:52,586 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,34653,1732148450571 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:52,587 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148482588 2024-11-21T00:20:52,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:20:52,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:20:52,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:20:52,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:20:52,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:20:52,588 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:20:52,589 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:52,589 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:20:52,590 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:52,590 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:20:52,592 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,596 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:20:52,596 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:20:52,596 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:20:52,596 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:20:52,597 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:20:52,597 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:20:52,601 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148452597,5,FailOnTimeoutGroup] 2024-11-21T00:20:52,604 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148452601,5,FailOnTimeoutGroup] 2024-11-21T00:20:52,604 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,604 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:20:52,604 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,604 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,606 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(746): ClusterId : 45829647-44d5-4cc7-bba1-1ee88815a87f 2024-11-21T00:20:52,606 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:20:52,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:20:52,609 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:20:52,609 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911 2024-11-21T00:20:52,620 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:20:52,620 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:20:52,633 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:20:52,633 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12eca8c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:20:52,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:20:52,648 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:34237 2024-11-21T00:20:52,649 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:20:52,649 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:20:52,649 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:20:52,650 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,34653,1732148450571 with port=34237, startcode=1732148450960 2024-11-21T00:20:52,650 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:20:52,652 INFO [HMaster-EventLoopGroup-25-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37857, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:20:52,652 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34653 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:52,653 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34653 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:52,654 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911 2024-11-21T00:20:52,654 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37411 2024-11-21T00:20:52,654 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:20:52,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113/rs 2024-11-21T00:20:52,688 DEBUG [RS:0;5ed4808ef0e6:34237 {}] zookeeper.ZKUtil(111): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on existing znode=/1-1254608113/rs/5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:52,688 WARN [RS:0;5ed4808ef0e6:34237 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:20:52,689 INFO [RS:0;5ed4808ef0e6:34237 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:52,689 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:52,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,34237,1732148450960] 2024-11-21T00:20:52,701 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:20:52,704 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:20:52,712 INFO [RS:0;5ed4808ef0e6:34237 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:20:52,712 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,724 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:20:52,725 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:20:52,725 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,725 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,725 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,725 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:52,726 DEBUG [RS:0;5ed4808ef0e6:34237 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:20:52,731 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,731 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,731 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,731 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,731 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,731 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34237,1732148450960-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:52,749 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:20:52,750 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34237,1732148450960-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,750 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,750 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.Replication(171): 5ed4808ef0e6,34237,1732148450960 started 2024-11-21T00:20:52,767 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:52,768 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,34237,1732148450960, RpcServer on 5ed4808ef0e6/172.17.0.2:34237, sessionid=0x1015ac3b1060004 2024-11-21T00:20:52,768 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:20:52,768 DEBUG [RS:0;5ed4808ef0e6:34237 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:52,768 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,34237,1732148450960' 2024-11-21T00:20:52,768 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1254608113/flush-table-proc/abort' 2024-11-21T00:20:52,769 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1254608113/flush-table-proc/acquired' 2024-11-21T00:20:52,769 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:20:52,769 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:20:52,769 DEBUG [RS:0;5ed4808ef0e6:34237 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:52,769 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,34237,1732148450960' 2024-11-21T00:20:52,769 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-1254608113/online-snapshot/abort' 2024-11-21T00:20:52,769 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-1254608113/online-snapshot/acquired' 2024-11-21T00:20:52,770 DEBUG [RS:0;5ed4808ef0e6:34237 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:20:52,770 INFO [RS:0;5ed4808ef0e6:34237 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:20:52,770 INFO [RS:0;5ed4808ef0e6:34237 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:20:52,870 INFO [RS:0;5ed4808ef0e6:34237 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:20:52,872 INFO [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C34237%2C1732148450960, suffix=, logDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960, archiveDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/oldWALs, maxLogs=10 2024-11-21T00:20:52,894 DEBUG [RS:0;5ed4808ef0e6:34237 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873, exclude list is [], retry=0 2024-11-21T00:20:52,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38465,DS-8923e73c-8c4a-466a-8ab7-6e1ff6f891fa,DISK] 2024-11-21T00:20:52,904 INFO [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 2024-11-21T00:20:52,905 DEBUG [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35915:35915)] 2024-11-21T00:20:53,035 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:53,036 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:53,037 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:53,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:53,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:53,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,039 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:53,040 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:53,040 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:53,041 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:53,041 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:53,043 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740 2024-11-21T00:20:53,043 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740 2024-11-21T00:20:53,044 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:53,044 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:53,045 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:53,045 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:53,048 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:53,049 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59545578, jitterRate=-0.11270174384117126}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:53,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148453035Initializing all the Stores at 1732148453035Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148453036 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148453036Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148453036Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148453036Cleaning up temporary data from old regions at 1732148453044 (+8 ms)Region opened successfully at 1732148453049 (+5 ms) 2024-11-21T00:20:53,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:20:53,049 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:20:53,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:20:53,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:20:53,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:20:53,049 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:20:53,049 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148453049Disabling compacts and flushes for region at 1732148453049Disabling writes for close at 1732148453049Writing region close event to WAL at 1732148453049Closed at 1732148453049 2024-11-21T00:20:53,050 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:53,050 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:20:53,050 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:20:53,054 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:53,055 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:20:53,150 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:20:53,205 DEBUG [5ed4808ef0e6:34653 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:20:53,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:53,207 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,34237,1732148450960, state=OPENING 2024-11-21T00:20:53,219 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:20:53,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:53,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:20:53,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1254608113/meta-region-server: CHANGED 2024-11-21T00:20:53,230 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:20:53,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,34237,1732148450960}] 2024-11-21T00:20:53,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1254608113/meta-region-server: CHANGED 2024-11-21T00:20:53,388 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:20:53,389 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49743, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:20:53,396 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:20:53,396 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:53,397 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:20:53,398 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C34237%2C1732148450960.meta, suffix=.meta, logDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960, archiveDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/oldWALs, maxLogs=10 2024-11-21T00:20:53,411 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.meta.1732148453398.meta, exclude list is [], retry=0 2024-11-21T00:20:53,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38465,DS-8923e73c-8c4a-466a-8ab7-6e1ff6f891fa,DISK] 2024-11-21T00:20:53,416 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.meta.1732148453398.meta 2024-11-21T00:20:53,416 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35915:35915)] 2024-11-21T00:20:53,416 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:53,417 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:53,417 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:53,417 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:20:53,417 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:20:53,417 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:20:53,417 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:53,417 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:20:53,417 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:20:53,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:20:53,419 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:20:53,419 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,420 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:20:53,421 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:20:53,421 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,421 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:20:53,422 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:20:53,422 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:20:53,423 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:20:53,423 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,423 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:20:53,423 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:20:53,425 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740 2024-11-21T00:20:53,426 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740 2024-11-21T00:20:53,428 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:20:53,428 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:20:53,428 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:20:53,430 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:20:53,430 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66261633, jitterRate=-0.012624725699424744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:20:53,430 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:20:53,431 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148453418Writing region info on filesystem at 1732148453418Initializing all the Stores at 1732148453418Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148453418Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148453419 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148453419Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148453419Cleaning up temporary data from old regions at 1732148453428 (+9 ms)Running coprocessor post-open hooks at 1732148453430 (+2 ms)Region opened successfully at 1732148453430 2024-11-21T00:20:53,431 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148453387 2024-11-21T00:20:53,433 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:20:53,434 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:20:53,434 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:53,435 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,34237,1732148450960, state=OPEN 2024-11-21T00:20:53,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1254608113/meta-region-server 2024-11-21T00:20:53,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-1254608113/meta-region-server 2024-11-21T00:20:53,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1254608113/meta-region-server: CHANGED 2024-11-21T00:20:53,464 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:53,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-1254608113/meta-region-server: CHANGED 2024-11-21T00:20:53,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:20:53,466 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,34237,1732148450960 in 234 msec 2024-11-21T00:20:53,468 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:20:53,468 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 416 msec 2024-11-21T00:20:53,469 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:20:53,469 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:20:53,470 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:53,470 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,34237,1732148450960, seqNum=-1] 2024-11-21T00:20:53,470 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:53,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57121, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:53,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 891 msec 2024-11-21T00:20:53,476 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148453476, completionTime=-1 2024-11-21T00:20:53,476 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:20:53,476 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:20:53,478 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:20:53,478 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148513478 2024-11-21T00:20:53,478 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148573478 2024-11-21T00:20:53,478 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:20:53,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34653,1732148450571-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:53,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34653,1732148450571-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:53,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34653,1732148450571-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:53,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:34653, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:53,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:53,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:53,481 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:20:53,482 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.431sec 2024-11-21T00:20:53,482 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:20:53,482 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:20:53,482 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:20:53,483 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:20:53,483 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:20:53,483 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34653,1732148450571-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:20:53,483 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34653,1732148450571-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:20:53,485 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:20:53,485 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:20:53,485 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,34653,1732148450571-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:20:53,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fe6e69e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:53,507 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34653,-1 for getting cluster id 2024-11-21T00:20:53,507 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:53,508 DEBUG [HMaster-EventLoopGroup-25-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45829647-44d5-4cc7-bba1-1ee88815a87f' 2024-11-21T00:20:53,508 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:53,508 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45829647-44d5-4cc7-bba1-1ee88815a87f" 2024-11-21T00:20:53,509 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@292c33c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:53,509 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34653,-1] 2024-11-21T00:20:53,509 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:53,509 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:53,510 INFO [HMaster-EventLoopGroup-25-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49056, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:53,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@261561f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:53,511 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:53,512 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,34237,1732148450960, seqNum=-1] 2024-11-21T00:20:53,512 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:53,517 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39082, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:20:53,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:53,519 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:49683 2024-11-21T00:20:53,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:20:53,604 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015ac3b1060005 connected 2024-11-21T00:20:53,680 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:53,681 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:53,681 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@33190248 2024-11-21T00:20:53,682 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:53,683 INFO [HMaster-EventLoopGroup-23-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35848, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:53,683 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:20:53,686 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:20:53,686 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:53,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:20:53,687 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:20:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:53,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:20:53,713 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 04d0dd054c4f9e0316fac51b61606b4b, NAME => 'test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a 2024-11-21T00:20:53,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:20:53,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:54,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:54,137 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:54,137 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing 04d0dd054c4f9e0316fac51b61606b4b, disabling compactions & flushes 2024-11-21T00:20:54,137 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. 2024-11-21T00:20:54,137 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. 2024-11-21T00:20:54,137 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. after waiting 0 ms 2024-11-21T00:20:54,137 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. 2024-11-21T00:20:54,137 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. 2024-11-21T00:20:54,137 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for 04d0dd054c4f9e0316fac51b61606b4b: Waiting for close lock at 1732148454137Disabling compacts and flushes for region at 1732148454137Disabling writes for close at 1732148454137Writing region close event to WAL at 1732148454137Closed at 1732148454137 2024-11-21T00:20:54,138 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:20:54,139 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148454138"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148454138"}]},"ts":"1732148454138"} 2024-11-21T00:20:54,141 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:20:54,141 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:20:54,142 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148454141"}]},"ts":"1732148454141"} 2024-11-21T00:20:54,143 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:20:54,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=04d0dd054c4f9e0316fac51b61606b4b, ASSIGN}] 2024-11-21T00:20:54,145 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=04d0dd054c4f9e0316fac51b61606b4b, ASSIGN 2024-11-21T00:20:54,145 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=04d0dd054c4f9e0316fac51b61606b4b, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,33745,1732148444978; forceNewPlan=false, retain=false 2024-11-21T00:20:54,296 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=04d0dd054c4f9e0316fac51b61606b4b, regionState=OPENING, regionLocation=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:54,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=04d0dd054c4f9e0316fac51b61606b4b, ASSIGN because future has completed 2024-11-21T00:20:54,299 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04d0dd054c4f9e0316fac51b61606b4b, server=5ed4808ef0e6,33745,1732148444978}] 2024-11-21T00:20:54,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:54,455 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. 2024-11-21T00:20:54,455 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 04d0dd054c4f9e0316fac51b61606b4b, NAME => 'test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:54,456 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:54,456 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:20:54,456 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,456 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:54,456 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,456 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,457 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,458 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04d0dd054c4f9e0316fac51b61606b4b columnFamilyName f 2024-11-21T00:20:54,459 DEBUG [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:54,459 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] regionserver.HStore(327): Store=04d0dd054c4f9e0316fac51b61606b4b/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:54,459 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,460 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04d0dd054c4f9e0316fac51b61606b4b columnFamilyName f1 2024-11-21T00:20:54,460 DEBUG [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:54,460 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] regionserver.HStore(327): Store=04d0dd054c4f9e0316fac51b61606b4b/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:54,461 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,461 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04d0dd054c4f9e0316fac51b61606b4b columnFamilyName norep 2024-11-21T00:20:54,462 DEBUG [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:54,462 INFO [StoreOpener-04d0dd054c4f9e0316fac51b61606b4b-1 {}] regionserver.HStore(327): Store=04d0dd054c4f9e0316fac51b61606b4b/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:54,462 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,462 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,463 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,464 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,464 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,464 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:20:54,465 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,474 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:54,475 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 04d0dd054c4f9e0316fac51b61606b4b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64145019, jitterRate=-0.04416473209857941}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:20:54,475 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 04d0dd054c4f9e0316fac51b61606b4b 2024-11-21T00:20:54,476 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 04d0dd054c4f9e0316fac51b61606b4b: Running coprocessor pre-open hook at 1732148454456Writing region info on filesystem at 1732148454456Initializing all the Stores at 1732148454457 (+1 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148454457Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148454457Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148454457Cleaning up temporary data from old regions at 1732148454464 (+7 ms)Running coprocessor post-open hooks at 1732148454475 (+11 ms)Region opened successfully at 1732148454476 (+1 ms) 2024-11-21T00:20:54,477 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b., pid=6, masterSystemTime=1732148454452 2024-11-21T00:20:54,479 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. 2024-11-21T00:20:54,479 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b. 2024-11-21T00:20:54,479 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=04d0dd054c4f9e0316fac51b61606b4b, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:54,482 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04d0dd054c4f9e0316fac51b61606b4b, server=5ed4808ef0e6,33745,1732148444978 because future has completed 2024-11-21T00:20:54,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:20:54,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 04d0dd054c4f9e0316fac51b61606b4b, server=5ed4808ef0e6,33745,1732148444978 in 184 msec 2024-11-21T00:20:54,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:20:54,487 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=04d0dd054c4f9e0316fac51b61606b4b, ASSIGN in 341 msec 2024-11-21T00:20:54,487 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:20:54,488 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148454487"}]},"ts":"1732148454487"} 2024-11-21T00:20:54,490 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:20:54,491 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:20:54,492 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 808 msec 2024-11-21T00:20:54,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:54,828 INFO [RPCClient-NioEventLoopGroup-4-13 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:20:54,828 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:54,829 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:54,829 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@790cca63 2024-11-21T00:20:54,829 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:54,830 INFO [HMaster-EventLoopGroup-25-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49070, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:54,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:54,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:20:54,833 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:20:54,833 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:54,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:20:54,833 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:20:54,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:20:54,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:55,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:55,242 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 26fa993800b450213215e7fb728f4f55, NAME => 'test,,1732148454830.26fa993800b450213215e7fb728f4f55.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911 2024-11-21T00:20:55,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:20:55,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:55,683 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148454830.26fa993800b450213215e7fb728f4f55.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:55,683 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing 26fa993800b450213215e7fb728f4f55, disabling compactions & flushes 2024-11-21T00:20:55,683 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:20:55,683 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:20:55,683 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148454830.26fa993800b450213215e7fb728f4f55. after waiting 0 ms 2024-11-21T00:20:55,683 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:20:55,683 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:20:55,683 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for 26fa993800b450213215e7fb728f4f55: Waiting for close lock at 1732148455683Disabling compacts and flushes for region at 1732148455683Disabling writes for close at 1732148455683Writing region close event to WAL at 1732148455683Closed at 1732148455683 2024-11-21T00:20:55,684 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:20:55,685 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148454830.26fa993800b450213215e7fb728f4f55.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148455684"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148455684"}]},"ts":"1732148455684"} 2024-11-21T00:20:55,688 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:20:55,689 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:20:55,690 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148455690"}]},"ts":"1732148455690"} 2024-11-21T00:20:55,692 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:20:55,692 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=26fa993800b450213215e7fb728f4f55, ASSIGN}] 2024-11-21T00:20:55,694 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=26fa993800b450213215e7fb728f4f55, ASSIGN 2024-11-21T00:20:55,695 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=26fa993800b450213215e7fb728f4f55, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,34237,1732148450960; forceNewPlan=false, retain=false 2024-11-21T00:20:55,845 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=26fa993800b450213215e7fb728f4f55, regionState=OPENING, regionLocation=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:55,848 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=26fa993800b450213215e7fb728f4f55, ASSIGN because future has completed 2024-11-21T00:20:55,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26fa993800b450213215e7fb728f4f55, server=5ed4808ef0e6,34237,1732148450960}] 2024-11-21T00:20:55,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:56,004 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:20:56,004 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 26fa993800b450213215e7fb728f4f55, NAME => 'test,,1732148454830.26fa993800b450213215e7fb728f4f55.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:56,005 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:56,005 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:20:56,005 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,005 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148454830.26fa993800b450213215e7fb728f4f55.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:56,005 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,005 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,006 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,007 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26fa993800b450213215e7fb728f4f55 columnFamilyName f 2024-11-21T00:20:56,007 DEBUG [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:56,008 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] regionserver.HStore(327): Store=26fa993800b450213215e7fb728f4f55/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:56,008 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,009 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26fa993800b450213215e7fb728f4f55 columnFamilyName f1 2024-11-21T00:20:56,009 DEBUG [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:56,009 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] regionserver.HStore(327): Store=26fa993800b450213215e7fb728f4f55/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:56,009 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,010 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26fa993800b450213215e7fb728f4f55 columnFamilyName norep 2024-11-21T00:20:56,010 DEBUG [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:56,010 INFO [StoreOpener-26fa993800b450213215e7fb728f4f55-1 {}] regionserver.HStore(327): Store=26fa993800b450213215e7fb728f4f55/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:56,010 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,011 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,011 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,012 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,012 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,013 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:20:56,014 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,017 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:56,017 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 26fa993800b450213215e7fb728f4f55; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62262295, jitterRate=-0.07221950590610504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:20:56,017 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:20:56,017 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 26fa993800b450213215e7fb728f4f55: Running coprocessor pre-open hook at 1732148456005Writing region info on filesystem at 1732148456005Initializing all the Stores at 1732148456006 (+1 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148456006Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148456006Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148456006Cleaning up temporary data from old regions at 1732148456012 (+6 ms)Running coprocessor post-open hooks at 1732148456017 (+5 ms)Region opened successfully at 1732148456017 2024-11-21T00:20:56,018 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148454830.26fa993800b450213215e7fb728f4f55., pid=6, masterSystemTime=1732148456001 2024-11-21T00:20:56,021 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:20:56,021 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:20:56,022 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=26fa993800b450213215e7fb728f4f55, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:20:56,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26fa993800b450213215e7fb728f4f55, server=5ed4808ef0e6,34237,1732148450960 because future has completed 2024-11-21T00:20:56,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:20:56,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 26fa993800b450213215e7fb728f4f55, server=5ed4808ef0e6,34237,1732148450960 in 177 msec 2024-11-21T00:20:56,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:20:56,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=26fa993800b450213215e7fb728f4f55, ASSIGN in 335 msec 2024-11-21T00:20:56,030 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:20:56,031 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148456030"}]},"ts":"1732148456030"} 2024-11-21T00:20:56,032 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:20:56,033 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:20:56,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 1.2020 sec 2024-11-21T00:20:56,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-21T00:20:56,308 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-21T00:20:56,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_test 2024-11-21T00:20:56,308 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_test Metrics about Tables on a single HBase RegionServer 2024-11-21T00:20:56,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:20:56,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver Metrics about HBase RegionObservers 2024-11-21T00:20:56,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:20:56,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter Metrics about HBase RegionObservers 2024-11-21T00:20:56,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:20:56,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-21T00:20:56,309 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-21T00:20:56,309 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-21T00:20:56,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:20:56,978 INFO [RPCClient-NioEventLoopGroup-4-14 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:20:56,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@504b4db6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,979 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35861,-1 for getting cluster id 2024-11-21T00:20:56,979 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:56,980 DEBUG [HMaster-EventLoopGroup-23-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00ce61ad-d457-45d3-9d65-31dda6579f89' 2024-11-21T00:20:56,981 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:56,981 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00ce61ad-d457-45d3-9d65-31dda6579f89" 2024-11-21T00:20:56,981 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11f7e50b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,981 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35861,-1] 2024-11-21T00:20:56,982 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:56,982 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:56,983 INFO [HMaster-EventLoopGroup-23-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35866, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:56,984 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65dcae23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,987 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ae6be8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,987 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34653,-1 for getting cluster id 2024-11-21T00:20:56,987 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:56,988 DEBUG [HMaster-EventLoopGroup-25-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45829647-44d5-4cc7-bba1-1ee88815a87f' 2024-11-21T00:20:56,988 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:56,989 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45829647-44d5-4cc7-bba1-1ee88815a87f" 2024-11-21T00:20:56,989 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@225f431c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,989 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34653,-1] 2024-11-21T00:20:56,989 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:56,989 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:56,991 INFO [HMaster-EventLoopGroup-25-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49088, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:56,992 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e9dee00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,993 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77f5c332, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,993 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35861,-1 for getting cluster id 2024-11-21T00:20:56,993 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:56,994 DEBUG [HMaster-EventLoopGroup-23-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00ce61ad-d457-45d3-9d65-31dda6579f89' 2024-11-21T00:20:56,994 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:56,994 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00ce61ad-d457-45d3-9d65-31dda6579f89" 2024-11-21T00:20:56,994 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64f2d93a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,994 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35861,-1] 2024-11-21T00:20:56,994 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:56,995 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:56,995 INFO [HMaster-EventLoopGroup-23-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35886, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:56,996 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20cec2a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:56,996 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:56,997 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:56,997 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4a48c93b 2024-11-21T00:20:56,997 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:56,999 INFO [HMaster-EventLoopGroup-23-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35888, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:56,999 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:34653,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:20:57,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:20:57,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:57,002 DEBUG [PEWorker-1 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:34653' 2024-11-21T00:20:57,004 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4082286b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:57,004 DEBUG [PEWorker-1 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34653,-1 for getting cluster id 2024-11-21T00:20:57,005 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:57,005 DEBUG [HMaster-EventLoopGroup-25-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45829647-44d5-4cc7-bba1-1ee88815a87f' 2024-11-21T00:20:57,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:57,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45829647-44d5-4cc7-bba1-1ee88815a87f" 2024-11-21T00:20:57,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c3466de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:57,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34653,-1] 2024-11-21T00:20:57,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:57,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:57,007 INFO [HMaster-EventLoopGroup-25-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49110, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:57,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fd6ac0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:57,008 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:57,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:57,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@21b0bc4d 2024-11-21T00:20:57,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:57,010 INFO [HMaster-EventLoopGroup-25-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49112, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:57,011 INFO [PEWorker-1 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-1. 2024-11-21T00:20:57,011 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:20:57,011 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:57,012 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:57,012 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:57,012 INFO [PEWorker-1 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:57,013 DEBUG [PEWorker-1 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:20:57,015 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:20:57,015 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:57,016 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:20:57,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:20:57,047 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ee3431ada32b00d5fdbcd917d1fe42f7, NAME => 'hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a 2024-11-21T00:20:57,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:20:57,063 DEBUG [PEWorker-1 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:20:57,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:57,468 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:57,468 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing ee3431ada32b00d5fdbcd917d1fe42f7, disabling compactions & flushes 2024-11-21T00:20:57,468 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. 2024-11-21T00:20:57,468 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. 2024-11-21T00:20:57,469 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. after waiting 0 ms 2024-11-21T00:20:57,469 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. 2024-11-21T00:20:57,469 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. 2024-11-21T00:20:57,469 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for ee3431ada32b00d5fdbcd917d1fe42f7: Waiting for close lock at 1732148457468Disabling compacts and flushes for region at 1732148457468Disabling writes for close at 1732148457469 (+1 ms)Writing region close event to WAL at 1732148457469Closed at 1732148457469 2024-11-21T00:20:57,484 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:20:57,485 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148457484"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148457484"}]},"ts":"1732148457484"} 2024-11-21T00:20:57,494 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:20:57,509 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:20:57,510 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148457509"}]},"ts":"1732148457509"} 2024-11-21T00:20:57,512 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:20:57,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=ee3431ada32b00d5fdbcd917d1fe42f7, ASSIGN}] 2024-11-21T00:20:57,514 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=ee3431ada32b00d5fdbcd917d1fe42f7, ASSIGN 2024-11-21T00:20:57,515 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=ee3431ada32b00d5fdbcd917d1fe42f7, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,33745,1732148444978; forceNewPlan=false, retain=false 2024-11-21T00:20:57,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:57,666 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=ee3431ada32b00d5fdbcd917d1fe42f7, regionState=OPENING, regionLocation=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:57,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=ee3431ada32b00d5fdbcd917d1fe42f7, ASSIGN because future has completed 2024-11-21T00:20:57,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure ee3431ada32b00d5fdbcd917d1fe42f7, server=5ed4808ef0e6,33745,1732148444978}] 2024-11-21T00:20:57,849 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. 2024-11-21T00:20:57,849 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:20:57,849 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:20:57,853 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33745%2C1732148444978.rep, suffix=, logDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978, archiveDir=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/oldWALs, maxLogs=10 2024-11-21T00:20:57,875 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.rep.1732148457853, exclude list is [], retry=0 2024-11-21T00:20:57,882 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45219,DS-d31d32eb-9dd6-4e7a-9d41-c4adf50f3925,DISK] 2024-11-21T00:20:57,894 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.rep.1732148457853 2024-11-21T00:20:57,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35227:35227)] 2024-11-21T00:20:57,904 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => ee3431ada32b00d5fdbcd917d1fe42f7, NAME => 'hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:20:57,905 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:20:57,905 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:20:57,905 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. service=MultiRowMutationService 2024-11-21T00:20:57,905 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:20:57,905 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,905 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:20:57,905 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,905 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,918 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,920 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee3431ada32b00d5fdbcd917d1fe42f7 columnFamilyName hfileref 2024-11-21T00:20:57,920 DEBUG [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:57,925 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] regionserver.HStore(327): Store=ee3431ada32b00d5fdbcd917d1fe42f7/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:57,925 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,928 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee3431ada32b00d5fdbcd917d1fe42f7 columnFamilyName queue 2024-11-21T00:20:57,928 DEBUG [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:57,928 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] regionserver.HStore(327): Store=ee3431ada32b00d5fdbcd917d1fe42f7/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:57,929 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,930 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee3431ada32b00d5fdbcd917d1fe42f7 columnFamilyName sid 2024-11-21T00:20:57,930 DEBUG [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:57,930 INFO [StoreOpener-ee3431ada32b00d5fdbcd917d1fe42f7-1 {}] regionserver.HStore(327): Store=ee3431ada32b00d5fdbcd917d1fe42f7/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:20:57,930 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,931 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/replication/ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,931 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/replication/ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,932 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,932 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,933 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:20:57,936 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,946 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/hbase/replication/ee3431ada32b00d5fdbcd917d1fe42f7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:20:57,947 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened ee3431ada32b00d5fdbcd917d1fe42f7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68991396, jitterRate=0.028051912784576416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:20:57,947 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ee3431ada32b00d5fdbcd917d1fe42f7 2024-11-21T00:20:57,948 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for ee3431ada32b00d5fdbcd917d1fe42f7: Running coprocessor pre-open hook at 1732148457906Writing region info on filesystem at 1732148457906Initializing all the Stores at 1732148457914 (+8 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148457914Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148457918 (+4 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148457918Cleaning up temporary data from old regions at 1732148457932 (+14 ms)Running coprocessor post-open hooks at 1732148457947 (+15 ms)Region opened successfully at 1732148457948 (+1 ms) 2024-11-21T00:20:57,952 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7., pid=10, masterSystemTime=1732148457832 2024-11-21T00:20:57,955 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. 2024-11-21T00:20:57,955 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. 2024-11-21T00:20:57,956 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=ee3431ada32b00d5fdbcd917d1fe42f7, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33745,1732148444978 2024-11-21T00:20:57,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure ee3431ada32b00d5fdbcd917d1fe42f7, server=5ed4808ef0e6,33745,1732148444978 because future has completed 2024-11-21T00:20:57,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:20:57,963 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure ee3431ada32b00d5fdbcd917d1fe42f7, server=5ed4808ef0e6,33745,1732148444978 in 285 msec 2024-11-21T00:20:57,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:20:57,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=ee3431ada32b00d5fdbcd917d1fe42f7, ASSIGN in 451 msec 2024-11-21T00:20:57,973 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:20:57,973 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148457973"}]},"ts":"1732148457973"} 2024-11-21T00:20:57,975 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:20:57,976 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:20:57,978 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 964 msec 2024-11-21T00:20:58,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2] 2024-11-21T00:20:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:58,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35861-0x1015ac3b1060000, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:58,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33745-0x1015ac3b1060001, quorum=127.0.0.1:49683, baseZNode=/0-657279644 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0-657279644 2024-11-21T00:20:58,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:20:58,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33745 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:20:58,520 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:20:58,578 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,33745,1732148444978, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:20:58,578 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:20:58,579 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33745,1732148444978, seqNum=-1] 2024-11-21T00:20:58,579 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:20:58,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57101, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=ClientService 2024-11-21T00:20:58,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,33745,1732148444978', locateType=CURRENT is [region=hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2] 2024-11-21T00:20:58,588 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:20:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:20:58,595 INFO [PEWorker-2 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,33745,1732148444978 suceeded 2024-11-21T00:20:58,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:20:58,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 238 msec 2024-11-21T00:20:58,598 INFO [PEWorker-2 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:34653,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:20:58,600 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.5990 sec 2024-11-21T00:20:58,622 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:34653' 2024-11-21T00:20:58,631 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@636ab94a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:58,631 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34653,-1 for getting cluster id 2024-11-21T00:20:58,632 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:58,632 DEBUG [HMaster-EventLoopGroup-25-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45829647-44d5-4cc7-bba1-1ee88815a87f' 2024-11-21T00:20:58,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:58,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45829647-44d5-4cc7-bba1-1ee88815a87f" 2024-11-21T00:20:58,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@66ab3b1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:58,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34653,-1] 2024-11-21T00:20:58,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:58,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:58,634 INFO [HMaster-EventLoopGroup-25-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49120, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:58,635 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3d926825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:58,635 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:58,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:58,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1e48d3e9 2024-11-21T00:20:58,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:58,637 INFO [HMaster-EventLoopGroup-25-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=MasterService 2024-11-21T00:20:58,638 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,33745,1732148444978 (queues=1) is replicating from cluster=00ce61ad-d457-45d3-9d65-31dda6579f89 to cluster=45829647-44d5-4cc7-bba1-1ee88815a87f 2024-11-21T00:20:58,638 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C33745%2C1732148444978 2024-11-21T00:20:58,638 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,33745,1732148444978, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:20:58,639 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287, startPosition=0, beingWritten=true 2024-11-21T00:20:58,640 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C33745%2C1732148444978 2024-11-21T00:20:58,651 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:20:58,651 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:20:58,651 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33745,1732148444978 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:20:58,856 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:20:58,919 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:20:58,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:58,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:58,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:58,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:58,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:58,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:58,965 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:20:58,974 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:20:59,151 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:20:59,160 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:20:59,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:59,179 INFO [RPCClient-NioEventLoopGroup-4-5 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:20:59,179 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:20:59,179 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.setUpClusterTablesAndPeers(TestMasterReplication.java:232) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:194) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:20:59,179 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:59,179 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:59,179 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:59,180 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ccaa80b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:59,180 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34653,-1 for getting cluster id 2024-11-21T00:20:59,180 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:59,181 DEBUG [HMaster-EventLoopGroup-25-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45829647-44d5-4cc7-bba1-1ee88815a87f' 2024-11-21T00:20:59,181 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:59,181 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45829647-44d5-4cc7-bba1-1ee88815a87f" 2024-11-21T00:20:59,181 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d44f0f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:59,181 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34653,-1] 2024-11-21T00:20:59,182 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:59,182 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:59,184 INFO [HMaster-EventLoopGroup-25-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49144, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:59,185 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@606fee66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:59,186 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:59,187 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34653,1732148450571 2024-11-21T00:20:59,187 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6a3a44bf 2024-11-21T00:20:59,190 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:59,194 INFO [HMaster-EventLoopGroup-25-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49156, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:59,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:35861,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:20:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:20:59,201 DEBUG [PEWorker-1 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:35861' 2024-11-21T00:20:59,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:59,202 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@792ae994, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:59,202 DEBUG [PEWorker-1 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35861,-1 for getting cluster id 2024-11-21T00:20:59,202 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:20:59,203 DEBUG [HMaster-EventLoopGroup-23-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00ce61ad-d457-45d3-9d65-31dda6579f89' 2024-11-21T00:20:59,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:20:59,203 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00ce61ad-d457-45d3-9d65-31dda6579f89" 2024-11-21T00:20:59,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7bb9008, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:59,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35861,-1] 2024-11-21T00:20:59,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:20:59,204 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:59,205 INFO [HMaster-EventLoopGroup-23-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:20:59,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a7a0b1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:20:59,206 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:20:59,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35861,1732148444638 2024-11-21T00:20:59,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5189cd95 2024-11-21T00:20:59,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:20:59,214 INFO [HMaster-EventLoopGroup-23-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35904, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:20:59,217 INFO [PEWorker-1 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-1. 2024-11-21T00:20:59,217 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:20:59,217 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:59,217 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:20:59,218 INFO [PEWorker-1 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:20:59,218 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:20:59,219 DEBUG [PEWorker-1 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:20:59,220 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:20:59,220 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:20:59,222 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:20:59,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:20:59,269 DEBUG [PEWorker-1 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:20:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:20:59,565 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:20:59,677 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1aca148ecd754b87a674ca563b713328, NAME => 'hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911 2024-11-21T00:20:59,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:20:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:21:00,074 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:21:00,091 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:21:00,091 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 1aca148ecd754b87a674ca563b713328, disabling compactions & flushes 2024-11-21T00:21:00,091 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:00,091 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:00,091 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. after waiting 0 ms 2024-11-21T00:21:00,091 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:00,091 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:00,091 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1aca148ecd754b87a674ca563b713328: Waiting for close lock at 1732148460091Disabling compacts and flushes for region at 1732148460091Disabling writes for close at 1732148460091Writing region close event to WAL at 1732148460091Closed at 1732148460091 2024-11-21T00:21:00,093 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:21:00,093 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148460093"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148460093"}]},"ts":"1732148460093"} 2024-11-21T00:21:00,102 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:21:00,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:21:00,105 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148460104"}]},"ts":"1732148460104"} 2024-11-21T00:21:00,108 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:21:00,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=1aca148ecd754b87a674ca563b713328, ASSIGN}] 2024-11-21T00:21:00,113 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=1aca148ecd754b87a674ca563b713328, ASSIGN 2024-11-21T00:21:00,114 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=1aca148ecd754b87a674ca563b713328, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,34237,1732148450960; forceNewPlan=false, retain=false 2024-11-21T00:21:00,266 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1aca148ecd754b87a674ca563b713328, regionState=OPENING, regionLocation=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:21:00,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=1aca148ecd754b87a674ca563b713328, ASSIGN because future has completed 2024-11-21T00:21:00,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1aca148ecd754b87a674ca563b713328, server=5ed4808ef0e6,34237,1732148450960}] 2024-11-21T00:21:00,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:21:00,456 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:00,456 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:21:00,457 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:21:00,461 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C34237%2C1732148450960.rep, suffix=, logDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960, archiveDir=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/oldWALs, maxLogs=10 2024-11-21T00:21:00,480 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.rep.1732148460462, exclude list is [], retry=0 2024-11-21T00:21:00,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38465,DS-8923e73c-8c4a-466a-8ab7-6e1ff6f891fa,DISK] 2024-11-21T00:21:00,489 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.rep.1732148460462 2024-11-21T00:21:00,496 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35915:35915)] 2024-11-21T00:21:00,496 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 1aca148ecd754b87a674ca563b713328, NAME => 'hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:21:00,497 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:21:00,497 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:21:00,497 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. service=MultiRowMutationService 2024-11-21T00:21:00,497 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:21:00,497 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,497 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:21:00,497 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,497 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,498 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,500 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1aca148ecd754b87a674ca563b713328 columnFamilyName hfileref 2024-11-21T00:21:00,500 DEBUG [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:21:00,500 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] regionserver.HStore(327): Store=1aca148ecd754b87a674ca563b713328/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:21:00,500 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,501 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1aca148ecd754b87a674ca563b713328 columnFamilyName queue 2024-11-21T00:21:00,501 DEBUG [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:21:00,503 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] regionserver.HStore(327): Store=1aca148ecd754b87a674ca563b713328/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:21:00,503 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,504 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1aca148ecd754b87a674ca563b713328 columnFamilyName sid 2024-11-21T00:21:00,504 DEBUG [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:21:00,505 INFO [StoreOpener-1aca148ecd754b87a674ca563b713328-1 {}] regionserver.HStore(327): Store=1aca148ecd754b87a674ca563b713328/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:21:00,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,506 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,506 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,506 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:21:00,507 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:21:00,516 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 1aca148ecd754b87a674ca563b713328; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75459614, jitterRate=0.12443587183952332}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:21:00,516 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:00,516 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 1aca148ecd754b87a674ca563b713328: Running coprocessor pre-open hook at 1732148460497Writing region info on filesystem at 1732148460497Initializing all the Stores at 1732148460498 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148460498Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148460498Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148460498Cleaning up temporary data from old regions at 1732148460506 (+8 ms)Running coprocessor post-open hooks at 1732148460516 (+10 ms)Region opened successfully at 1732148460516 2024-11-21T00:21:00,518 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328., pid=10, masterSystemTime=1732148460432 2024-11-21T00:21:00,523 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:00,523 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:00,523 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1aca148ecd754b87a674ca563b713328, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,34237,1732148450960 2024-11-21T00:21:00,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1aca148ecd754b87a674ca563b713328, server=5ed4808ef0e6,34237,1732148450960 because future has completed 2024-11-21T00:21:00,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:21:00,536 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 1aca148ecd754b87a674ca563b713328, server=5ed4808ef0e6,34237,1732148450960 in 262 msec 2024-11-21T00:21:00,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:21:00,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=1aca148ecd754b87a674ca563b713328, ASSIGN in 428 msec 2024-11-21T00:21:00,539 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:21:00,539 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148460539"}]},"ts":"1732148460539"} 2024-11-21T00:21:00,542 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:21:00,547 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:21:00,549 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 1.3290 sec 2024-11-21T00:21:00,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2] 2024-11-21T00:21:00,702 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:21:00,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:21:00,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:21:00,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:21:00,971 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34237 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:21:00,972 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:21:01,012 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,34237,1732148450960, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:21:01,013 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:21:01,013 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,34237,1732148450960, seqNum=-1] 2024-11-21T00:21:01,013 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:21:01,015 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44755, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=ClientService 2024-11-21T00:21:01,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,34237,1732148450960', locateType=CURRENT is [region=hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2] 2024-11-21T00:21:01,024 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:21:01,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:21:01,037 INFO [PEWorker-2 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,34237,1732148450960 suceeded 2024-11-21T00:21:01,040 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:35861' 2024-11-21T00:21:01,044 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@24e4a2ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:01,044 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35861,-1 for getting cluster id 2024-11-21T00:21:01,045 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:21:01,045 DEBUG [HMaster-EventLoopGroup-23-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00ce61ad-d457-45d3-9d65-31dda6579f89' 2024-11-21T00:21:01,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:21:01,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00ce61ad-d457-45d3-9d65-31dda6579f89" 2024-11-21T00:21:01,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1ae49e87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:01,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35861,-1] 2024-11-21T00:21:01,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:21:01,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:01,049 INFO [HMaster-EventLoopGroup-23-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35924, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:21:01,050 INFO [PEWorker-4 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:35861,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:21:01,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:21:01,050 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 219 msec 2024-11-21T00:21:01,051 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1e596a11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:01,051 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:21:01,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35861,1732148444638 2024-11-21T00:21:01,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@35d0666c 2024-11-21T00:21:01,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:21:01,053 INFO [HMaster-EventLoopGroup-23-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35932, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=MasterService 2024-11-21T00:21:01,057 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,34237,1732148450960 (queues=1) is replicating from cluster=45829647-44d5-4cc7-bba1-1ee88815a87f to cluster=00ce61ad-d457-45d3-9d65-31dda6579f89 2024-11-21T00:21:01,057 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C34237%2C1732148450960 2024-11-21T00:21:01,057 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,34237,1732148450960, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:21:01,060 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.shipper5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C34237%2C1732148450960 2024-11-21T00:21:01,064 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873, startPosition=0, beingWritten=true 2024-11-21T00:21:01,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.8560 sec 2024-11-21T00:21:01,077 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:21:01,077 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 379, reset compression=false 2024-11-21T00:21:01,078 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.shipper5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,34237,1732148450960 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:21:01,306 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 379, reset compression=false 2024-11-21T00:21:01,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:21:01,350 INFO [RPCClient-NioEventLoopGroup-4-8 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:21:01,352 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:21:01,352 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.setUpClusterTablesAndPeers(TestMasterReplication.java:233) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:194) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:01,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:01,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:01,354 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:21:01,356 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:21:01,363 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_0 is 36, key is aaaa/f:row/1732148461361/Put/seqid=0 2024-11-21T00:21:01,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741840_1016 (size=7894) 2024-11-21T00:21:01,393 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_1 is 34, key is ddd/f:row/1732148461393/Put/seqid=0 2024-11-21T00:21:01,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741841_1017 (size=7691) 2024-11-21T00:21:01,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@566262b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:01,411 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35861,-1 for getting cluster id 2024-11-21T00:21:01,411 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:21:01,412 DEBUG [HMaster-EventLoopGroup-23-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00ce61ad-d457-45d3-9d65-31dda6579f89' 2024-11-21T00:21:01,412 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:21:01,412 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00ce61ad-d457-45d3-9d65-31dda6579f89" 2024-11-21T00:21:01,413 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3406e996, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:01,413 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35861,-1] 2024-11-21T00:21:01,413 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:21:01,413 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:01,414 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:21:01,415 INFO [HMaster-EventLoopGroup-23-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35956, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:21:01,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@982e33, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:01,416 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:21:01,418 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33745,1732148444978, seqNum=-1] 2024-11-21T00:21:01,418 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:21:01,422 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52484, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:21:01,454 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:21:01,456 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35861,1732148444638 2024-11-21T00:21:01,456 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@27130584 2024-11-21T00:21:01,456 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:21:01,461 INFO [HMaster-EventLoopGroup-23-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35968, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:21:01,471 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2] 2024-11-21T00:21:01,489 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:21:01,505 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_0 first=Optional[aaaa] last=Optional[cccc] 2024-11-21T00:21:01,511 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_1 first=Optional[ddd] last=Optional[fff] 2024-11-21T00:21:01,542 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_0 for inclusion in 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:01,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(626): HFile bounds: first=aaaa last=cccc 2024-11-21T00:21:01,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:01,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_1 for inclusion in 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:01,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(626): HFile bounds: first=ddd last=fff 2024-11-21T00:21:01,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:01,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HRegion(2603): Flush status journal for 04d0dd054c4f9e0316fac51b61606b4b: 2024-11-21T00:21:01,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_0 to hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_0 2024-11-21T00:21:01,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_0 as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ 2024-11-21T00:21:01,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/testHFileCyclicReplication_01/f/hfile_1 to hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_1 2024-11-21T00:21:01,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_1 as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ 2024-11-21T00:21:01,567 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_0 into 04d0dd054c4f9e0316fac51b61606b4b/f as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ - updating store file list. 2024-11-21T00:21:01,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 738d4c1ab6b04e659280722f69f567c9_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:01,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ into 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:01,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_0 into 04d0dd054c4f9e0316fac51b61606b4b/f (new location: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_) 2024-11-21T00:21:01,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_0 2024-11-21T00:21:01,580 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_1 into 04d0dd054c4f9e0316fac51b61606b4b/f as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ - updating store file list. 2024-11-21T00:21:01,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 881165bacbc44ced9c7456c352c7520e_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:01,591 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ into 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:01,591 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_1 into 04d0dd054c4f9e0316fac51b61606b4b/f (new location: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_) 2024-11-21T00:21:01,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins__test__f8mavhl5eoqu3e96t6u9r32q6ovb633c7nqjr05bgk5tmuu01dc5jtfu5276i9n5/f/hfile_1 2024-11-21T00:21:01,595 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:21:01,595 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.replication.TestMasterReplication.loadAndValidateHFileReplication(TestMasterReplication.java:720) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:205) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:01,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:01,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:01,596 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:21:01,597 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncRegionLocatorHelper(64): Try updating region=test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2 , the old value is region=test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=5ed4808ef0e6:33745 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:21:01,597 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:21:01,597 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncRegionLocatorHelper(88): Try removing region=test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2 from cache 2024-11-21T00:21:01,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 264 connection: 172.17.0.2:52484 deadline: 1732148521595 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:52484 2024-11-21T00:21:01,600 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:21:01,602 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,34237,1732148450960, seqNum=-1] 2024-11-21T00:21:01,602 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:21:01,604 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39086, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:21:01,606 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148454830.26fa993800b450213215e7fb728f4f55., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2] 2024-11-21T00:21:01,610 INFO [Time-limited test {}] replication.TestMasterReplication(739): Waiting more time for bulkloaded data replication. 2024-11-21T00:21:01,616 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 379, reset compression=false 2024-11-21T00:21:01,709 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'test', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:33745 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-11-21T00:21:02,049 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 379, reset compression=false 2024-11-21T00:21:02,240 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 379, reset compression=false 2024-11-21T00:21:02,269 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:21:02,269 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33745,1732148444978 got entry batch from reader: WALEntryBatch [walEntries=[{test/04d0dd054c4f9e0316fac51b61606b4b/5=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148461593/Put/vlen=190/seqid=0; >],8098}], lastWalPath=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287, lastWalPosition=687, nbRowKeys=1, nbHFiles=2, heapSize=8098, lastSeqIds={}, endOfFile=false,usedBufferSize=407] 2024-11-21T00:21:02,281 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:21:02,282 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60790, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=AdminService 2024-11-21T00:21:02,284 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.ReplicationSink(318): Replicating [00ce61ad-d457-45d3-9d65-31dda6579f89] bulk loaded data 2024-11-21T00:21:02,288 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@5bb90f0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:02,288 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34653,-1 for getting cluster id 2024-11-21T00:21:02,288 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:21:02,289 DEBUG [HMaster-EventLoopGroup-25-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45829647-44d5-4cc7-bba1-1ee88815a87f' 2024-11-21T00:21:02,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:21:02,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45829647-44d5-4cc7-bba1-1ee88815a87f" 2024-11-21T00:21:02,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1c554263, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:02,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34653,-1] 2024-11-21T00:21:02,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:21:02,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:02,292 INFO [HMaster-EventLoopGroup-25-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38896, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:21:02,296 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@28c45330, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:02,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741840_1016 (size=7894) 2024-11-21T00:21:02,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741841_1017 (size=7691) 2024-11-21T00:21:02,469 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 687, reset compression=false 2024-11-21T00:21:02,570 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 379, reset compression=false 2024-11-21T00:21:02,613 INFO [Time-limited test {}] replication.TestMasterReplication(739): Waiting more time for bulkloaded data replication. 2024-11-21T00:21:02,778 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 687, reset compression=false 2024-11-21T00:21:02,843 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:21:02,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34653,1732148450571 2024-11-21T00:21:02,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5242bddb 2024-11-21T00:21:02,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:21:02,849 INFO [HMaster-EventLoopGroup-25-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38908, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=MasterService 2024-11-21T00:21:02,852 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:21:02,852 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:21:02,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,34237,1732148450960, seqNum=-1] 2024-11-21T00:21:02,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:21:02,855 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60792, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=ClientService 2024-11-21T00:21:02,892 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ first=Optional[aaaa] last=Optional[cccc] 2024-11-21T00:21:02,914 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ first=Optional[ddd] last=Optional[fff] 2024-11-21T00:21:02,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ for inclusion in 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(626): HFile bounds: first=ddd last=fff 2024-11-21T00:21:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:02,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ for inclusion in 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(626): HFile bounds: first=aaaa last=cccc 2024-11-21T00:21:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HRegion(2603): Flush status journal for 26fa993800b450213215e7fb728f4f55: 2024-11-21T00:21:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 881165bacbc44ced9c7456c352c7520e_SeqId_4_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:21:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/bf051829a1fb4570822d7844f700d8cf_SeqId_4_ 2024-11-21T00:21:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 738d4c1ab6b04e659280722f69f567c9_SeqId_4_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:21:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/262e98d9fd394e2aa812bcdd9f4b73da_SeqId_4_ 2024-11-21T00:21:02,992 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ into 26fa993800b450213215e7fb728f4f55/f as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/bf051829a1fb4570822d7844f700d8cf_SeqId_4_ - updating store file list. 2024-11-21T00:21:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStoreFile(483): HFile Bloom filter type for bf051829a1fb4570822d7844f700d8cf_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:03,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/bf051829a1fb4570822d7844f700d8cf_SeqId_4_ into 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:03,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ into 26fa993800b450213215e7fb728f4f55/f (new location: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/bf051829a1fb4570822d7844f700d8cf_SeqId_4_) 2024-11-21T00:21:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/881165bacbc44ced9c7456c352c7520e_SeqId_4_ 2024-11-21T00:21:03,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ into 26fa993800b450213215e7fb728f4f55/f as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/262e98d9fd394e2aa812bcdd9f4b73da_SeqId_4_ - updating store file list. 2024-11-21T00:21:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 262e98d9fd394e2aa812bcdd9f4b73da_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:03,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/262e98d9fd394e2aa812bcdd9f4b73da_SeqId_4_ into 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:03,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ into 26fa993800b450213215e7fb728f4f55/f (new location: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/262e98d9fd394e2aa812bcdd9f4b73da_SeqId_4_) 2024-11-21T00:21:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins.hfs.11__test__2rsvj009nkk4ekutll1btq1f15kpohf823al4pf2ng4d2tijh2crvbn1i2oucnke/f/738d4c1ab6b04e659280722f69f567c9_SeqId_4_ 2024-11-21T00:21:03,105 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.ReplicationSink(324): Finished replicating [00ce61ad-d457-45d3-9d65-31dda6579f89] bulk loaded data 2024-11-21T00:21:03,178 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 379, reset compression=false 2024-11-21T00:21:03,212 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 687, reset compression=false 2024-11-21T00:21:03,215 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:21:03,217 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.shipper5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,34237,1732148450960 got entry batch from reader: WALEntryBatch [walEntries=[{test/26fa993800b450213215e7fb728f4f55/5=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148463081/Put/vlen=228/seqid=0; >],8341}], lastWalPath=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873, lastWalPosition=725, nbRowKeys=1, nbHFiles=2, heapSize=8341, lastSeqIds={}, endOfFile=false,usedBufferSize=447] 2024-11-21T00:21:03,222 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.shipper5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:21:03,224 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36264, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.11 (auth:SIMPLE), service=AdminService 2024-11-21T00:21:03,225 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.ReplicationSink(318): Replicating [00ce61ad-d457-45d3-9d65-31dda6579f89, 45829647-44d5-4cc7-bba1-1ee88815a87f] bulk loaded data 2024-11-21T00:21:03,232 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@5382e81c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:03,232 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35861,-1 for getting cluster id 2024-11-21T00:21:03,233 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:21:03,234 DEBUG [HMaster-EventLoopGroup-23-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '00ce61ad-d457-45d3-9d65-31dda6579f89' 2024-11-21T00:21:03,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:21:03,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "00ce61ad-d457-45d3-9d65-31dda6579f89" 2024-11-21T00:21:03,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@51bb9fbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:03,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35861,-1] 2024-11-21T00:21:03,235 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:21:03,236 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:03,238 INFO [HMaster-EventLoopGroup-23-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38254, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:21:03,240 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@77abc3b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:03,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741842_1018 (size=7691) 2024-11-21T00:21:03,416 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 725, reset compression=false 2024-11-21T00:21:03,620 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:21:03,624 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_0 is 36, key is gggg/f:row/1732148463624/Put/seqid=0 2024-11-21T00:21:03,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741842_1018 (size=11194) 2024-11-21T00:21:03,641 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_1 is 34, key is jjj/f:row/1732148463640/Put/seqid=0 2024-11-21T00:21:03,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741843_1019 (size=10791) 2024-11-21T00:21:03,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741843_1019 (size=7894) 2024-11-21T00:21:03,736 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 725, reset compression=false 2024-11-21T00:21:03,737 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 687, reset compression=false 2024-11-21T00:21:04,055 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d002f78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:04,055 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,34653,-1 for getting cluster id 2024-11-21T00:21:04,056 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:21:04,056 DEBUG [HMaster-EventLoopGroup-25-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '45829647-44d5-4cc7-bba1-1ee88815a87f' 2024-11-21T00:21:04,056 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:21:04,057 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "45829647-44d5-4cc7-bba1-1ee88815a87f" 2024-11-21T00:21:04,057 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e169f5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:04,057 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,34653,-1] 2024-11-21T00:21:04,057 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:21:04,057 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:04,058 INFO [HMaster-EventLoopGroup-25-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38926, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:21:04,059 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@149fe2ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:21:04,059 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:21:04,060 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,34237,1732148450960, seqNum=-1] 2024-11-21T00:21:04,060 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:21:04,061 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-26-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60806, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:21:04,068 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:21:04,069 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34653,1732148450571 2024-11-21T00:21:04,069 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@34c79f7a 2024-11-21T00:21:04,069 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:21:04,070 INFO [HMaster-EventLoopGroup-25-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38930, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:21:04,074 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148454830.26fa993800b450213215e7fb728f4f55., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2] 2024-11-21T00:21:04,077 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:21:04,083 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_0 first=Optional[gggg] last=Optional[iiii] 2024-11-21T00:21:04,087 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_1 first=Optional[jjj] last=Optional[lll] 2024-11-21T00:21:04,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_1 for inclusion in 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:04,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(626): HFile bounds: first=jjj last=lll 2024-11-21T00:21:04,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:04,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_0 for inclusion in 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:04,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(626): HFile bounds: first=gggg last=iiii 2024-11-21T00:21:04,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:04,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HRegion(2603): Flush status journal for 26fa993800b450213215e7fb728f4f55: 2024-11-21T00:21:04,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_1 to hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_1 2024-11-21T00:21:04,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_1 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ 2024-11-21T00:21:04,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/testHFileCyclicReplication_10/f/hfile_0 to hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_0 2024-11-21T00:21:04,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_0 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ 2024-11-21T00:21:04,141 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:21:04,142 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 725, reset compression=false 2024-11-21T00:21:04,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35861,1732148444638 2024-11-21T00:21:04,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@60204305 2024-11-21T00:21:04,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:21:04,146 INFO [HMaster-EventLoopGroup-23-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38264, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=MasterService 2024-11-21T00:21:04,149 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:21:04,149 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:21:04,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33745,1732148444978, seqNum=-1] 2024-11-21T00:21:04,151 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:21:04,152 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36274, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.10 (auth:SIMPLE), service=ClientService 2024-11-21T00:21:04,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_1 into 26fa993800b450213215e7fb728f4f55/f as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ - updating store file list. 2024-11-21T00:21:04,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStoreFile(483): HFile Bloom filter type for afa56584fef849cd89d09d7e79665a69_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:04,162 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ into 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:04,162 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_1 into 26fa993800b450213215e7fb728f4f55/f (new location: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_) 2024-11-21T00:21:04,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_1 2024-11-21T00:21:04,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_0 into 26fa993800b450213215e7fb728f4f55/f as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ - updating store file list. 2024-11-21T00:21:04,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:04,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ into 26fa993800b450213215e7fb728f4f55/f 2024-11-21T00:21:04,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_0 into 26fa993800b450213215e7fb728f4f55/f (new location: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_) 2024-11-21T00:21:04,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/staging/jenkins__test__qb1vfr44q8q94ork0nh0s42tjdtbsrluhhn5tklrvu2ogd26berjecnvlslc2ke9/f/hfile_0 2024-11-21T00:21:04,170 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:21:04,170 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.replication.TestMasterReplication.loadAndValidateHFileReplication(TestMasterReplication.java:720) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:216) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:04,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:04,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:04,170 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:21:04,171 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:21:04,172 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__18rh2l5iisbuqbi7najkqgn7liordk7ectb47o5ishdjhjmsqs8k6e6q7594odbc/f/262e98d9fd394e2aa812bcdd9f4b73da_SeqId_4_ first=Optional[aaaa] last=Optional[cccc] 2024-11-21T00:21:04,172 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.AsyncRegionLocatorHelper(64): Try updating region=test,,1732148454830.26fa993800b450213215e7fb728f4f55., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2 , the old value is region=test,,1732148454830.26fa993800b450213215e7fb728f4f55., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=5ed4808ef0e6:34237 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:21:04,172 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=test,,1732148454830.26fa993800b450213215e7fb728f4f55., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:21:04,172 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.AsyncRegionLocatorHelper(88): Try removing region=test,,1732148454830.26fa993800b450213215e7fb728f4f55., hostname=5ed4808ef0e6,34237,1732148450960, seqNum=2 from cache 2024-11-21T00:21:04,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34237: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 264 connection: 172.17.0.2:60806 deadline: 1732148524172 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:60806 2024-11-21T00:21:04,173 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33745,1732148444978, seqNum=-1] 2024-11-21T00:21:04,174 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:21:04,175 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36290, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:21:04,177 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__18rh2l5iisbuqbi7najkqgn7liordk7ectb47o5ishdjhjmsqs8k6e6q7594odbc/f/bf051829a1fb4570822d7844f700d8cf_SeqId_4_ first=Optional[ddd] last=Optional[fff] 2024-11-21T00:21:04,177 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148453683.04d0dd054c4f9e0316fac51b61606b4b., hostname=5ed4808ef0e6,33745,1732148444978, seqNum=2] 2024-11-21T00:21:04,179 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.ReplicationSink(324): Finished replicating [00ce61ad-d457-45d3-9d65-31dda6579f89, 45829647-44d5-4cc7-bba1-1ee88815a87f] bulk loaded data 2024-11-21T00:21:04,187 INFO [Time-limited test {}] replication.TestMasterReplication(739): Waiting more time for bulkloaded data replication. 2024-11-21T00:21:04,194 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:21:04,195 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.shipper5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,34237,1732148450960 got entry batch from reader: WALEntryBatch [walEntries=[{test/26fa993800b450213215e7fb728f4f55/7=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148464168/Put/vlen=190/seqid=0; >],11601}], lastWalPath=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873, lastWalPosition=1033, nbRowKeys=1, nbHFiles=2, heapSize=11601, lastSeqIds={}, endOfFile=false,usedBufferSize=407] 2024-11-21T00:21:04,196 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.ReplicationSink(318): Replicating [45829647-44d5-4cc7-bba1-1ee88815a87f] bulk loaded data 2024-11-21T00:21:04,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741844_1020 (size=10791) 2024-11-21T00:21:04,279 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'test', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:34237 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-11-21T00:21:04,342 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 687, reset compression=false 2024-11-21T00:21:04,395 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 1033, reset compression=false 2024-11-21T00:21:04,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45219 is added to blk_1073741845_1021 (size=11194) 2024-11-21T00:21:04,701 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 1033, reset compression=false 2024-11-21T00:21:05,029 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:21:05,035 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ first=Optional[gggg] last=Optional[iiii] 2024-11-21T00:21:05,038 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ first=Optional[jjj] last=Optional[lll] 2024-11-21T00:21:05,041 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ for inclusion in 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(626): HFile bounds: first=gggg last=iiii 2024-11-21T00:21:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:05,044 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ for inclusion in 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:05,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(626): HFile bounds: first=jjj last=lll 2024-11-21T00:21:05,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:21:05,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HRegion(2603): Flush status journal for 04d0dd054c4f9e0316fac51b61606b4b: 2024-11-21T00:21:05,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:21:05,049 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 687, reset compression=false 2024-11-21T00:21:05,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/efa99adc28464dd5a3cfda1273ee5b51_SeqId_6_ 2024-11-21T00:21:05,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): afa56584fef849cd89d09d7e79665a69_SeqId_6_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:21:05,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/982bbe134a9744568af0575239a28900_SeqId_6_ 2024-11-21T00:21:05,059 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ into 04d0dd054c4f9e0316fac51b61606b4b/f as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/efa99adc28464dd5a3cfda1273ee5b51_SeqId_6_ - updating store file list. 2024-11-21T00:21:05,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStoreFile(483): HFile Bloom filter type for efa99adc28464dd5a3cfda1273ee5b51_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:05,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/efa99adc28464dd5a3cfda1273ee5b51_SeqId_6_ into 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:05,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ into 04d0dd054c4f9e0316fac51b61606b4b/f (new location: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/efa99adc28464dd5a3cfda1273ee5b51_SeqId_6_) 2024-11-21T00:21:05,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/5f9ea45798cf4bc6a8882841375bcac9_SeqId_6_ 2024-11-21T00:21:05,065 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ into 04d0dd054c4f9e0316fac51b61606b4b/f as hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/982bbe134a9744568af0575239a28900_SeqId_6_ - updating store file list. 2024-11-21T00:21:05,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 982bbe134a9744568af0575239a28900_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:21:05,070 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/982bbe134a9744568af0575239a28900_SeqId_6_ into 04d0dd054c4f9e0316fac51b61606b4b/f 2024-11-21T00:21:05,070 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ into 04d0dd054c4f9e0316fac51b61606b4b/f (new location: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/data/default/test/04d0dd054c4f9e0316fac51b61606b4b/f/982bbe134a9744568af0575239a28900_SeqId_6_) 2024-11-21T00:21:05,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/staging/jenkins.hfs.10__test__l631sek2n1add417pmmlls4e1geh4v9u1g2e2qep94qh1mm438opht40jqjcg9be/f/afa56584fef849cd89d09d7e79665a69_SeqId_6_ 2024-11-21T00:21:05,072 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 {}] regionserver.ReplicationSink(324): Finished replicating [45829647-44d5-4cc7-bba1-1ee88815a87f] bulk loaded data 2024-11-21T00:21:05,109 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 1033, reset compression=false 2024-11-21T00:21:05,199 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:21:05,199 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:21:05,199 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:221) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:05,199 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:05,199 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:05,199 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:21:05,200 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:21:05,200 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1529400062, stopped=false 2024-11-21T00:21:05,200 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,34653,1732148450571 2024-11-21T00:21:05,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1254608113/running 2024-11-21T00:21:05,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1254608113/running 2024-11-21T00:21:05,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:21:05,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:21:05,210 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:21:05,210 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:21:05,211 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:221) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:05,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:05,211 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on znode that does not yet exist, /1-1254608113/running 2024-11-21T00:21:05,211 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,34237,1732148450960' ***** 2024-11-21T00:21:05,211 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:21:05,211 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:21:05,211 INFO [RS:0;5ed4808ef0e6:34237 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:21:05,211 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:21:05,211 INFO [RS:0;5ed4808ef0e6:34237 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:21:05,211 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Set watcher on znode that does not yet exist, /1-1254608113/running 2024-11-21T00:21:05,211 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(3091): Received CLOSE for 1aca148ecd754b87a674ca563b713328 2024-11-21T00:21:05,220 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(3091): Received CLOSE for 26fa993800b450213215e7fb728f4f55 2024-11-21T00:21:05,220 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,34237,1732148450960 2024-11-21T00:21:05,220 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:21:05,220 INFO [RS:0;5ed4808ef0e6:34237 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:34237. 2024-11-21T00:21:05,220 DEBUG [RS:0;5ed4808ef0e6:34237 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:05,220 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:05,221 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:21:05,221 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:21:05,221 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:21:05,221 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:21:05,221 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:21:05,221 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 1aca148ecd754b87a674ca563b713328=hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328., 26fa993800b450213215e7fb728f4f55=test,,1732148454830.26fa993800b450213215e7fb728f4f55.} 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1aca148ecd754b87a674ca563b713328, disabling compactions & flushes 2024-11-21T00:21:05,221 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1aca148ecd754b87a674ca563b713328, 26fa993800b450213215e7fb728f4f55 2024-11-21T00:21:05,221 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. after waiting 0 ms 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:21:05,221 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:21:05,221 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1aca148ecd754b87a674ca563b713328 3/3 column families, dataSize=1.13 KB heapSize=2.75 KB 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:21:05,221 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:21:05,221 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:21:05,228 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:21:05,262 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/info/0560c6e868f24d79ba325b8a6eacc27b is 147, key is hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328./info:regioninfo/1732148460523/Put/seqid=0 2024-11-21T00:21:05,265 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/.tmp/hfileref/4065bbed30dc4de592f4078c1182c411 is 74, key is 1/hfileref:262e98d9fd394e2aa812bcdd9f4b73da_SeqId_4_/1732148464181/DeleteColumn/seqid=0 2024-11-21T00:21:05,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741844_1020 (size=7686) 2024-11-21T00:21:05,286 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/info/0560c6e868f24d79ba325b8a6eacc27b 2024-11-21T00:21:05,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741845_1021 (size=5345) 2024-11-21T00:21:05,317 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/ns/f2ecbee62307496ca2e83495216adca7 is 43, key is default/ns:d/1732148453472/Put/seqid=0 2024-11-21T00:21:05,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741846_1022 (size=5153) 2024-11-21T00:21:05,421 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1aca148ecd754b87a674ca563b713328, 26fa993800b450213215e7fb728f4f55 2024-11-21T00:21:05,617 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 1033, reset compression=false 2024-11-21T00:21:05,621 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1aca148ecd754b87a674ca563b713328, 26fa993800b450213215e7fb728f4f55 2024-11-21T00:21:05,702 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=560 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/.tmp/hfileref/4065bbed30dc4de592f4078c1182c411 2024-11-21T00:21:05,723 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/ns/f2ecbee62307496ca2e83495216adca7 2024-11-21T00:21:05,724 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/.tmp/queue/1cd4c30b2eaf4ae8b92c95de2b9248e7 is 154, key is 1-5ed4808ef0e6,34237,1732148450960/queue:5ed4808ef0e6%2C34237%2C1732148450960/1732148465076/Put/seqid=0 2024-11-21T00:21:05,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741847_1023 (size=5353) 2024-11-21T00:21:05,746 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/rep_barrier/bbfb24f2c5b64cb281e36cc00c2d836f is 112, key is test,,1732148454830.26fa993800b450213215e7fb728f4f55./rep_barrier:seqnumDuringOpen/1732148456022/Put/seqid=0 2024-11-21T00:21:05,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741848_1024 (size=5518) 2024-11-21T00:21:05,767 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:21:05,767 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:21:05,821 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1aca148ecd754b87a674ca563b713328, 26fa993800b450213215e7fb728f4f55 2024-11-21T00:21:05,853 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 687, reset compression=false 2024-11-21T00:21:05,999 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:21:06,022 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1aca148ecd754b87a674ca563b713328, 26fa993800b450213215e7fb728f4f55 2024-11-21T00:21:06,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:06,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:06,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:06,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:06,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:06,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:06,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:06,221 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.wal-reader.5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 to pos 1033, reset compression=false 2024-11-21T00:21:06,222 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1aca148ecd754b87a674ca563b713328, 26fa993800b450213215e7fb728f4f55 2024-11-21T00:21:06,308 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_replication 2024-11-21T00:21:06,308 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_replication Metrics about Tables on a single HBase RegionServer 2024-11-21T00:21:06,344 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/rep_barrier/bbfb24f2c5b64cb281e36cc00c2d836f 2024-11-21T00:21:06,344 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=595 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/.tmp/queue/1cd4c30b2eaf4ae8b92c95de2b9248e7 2024-11-21T00:21:06,344 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:21:06,344 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33745,1732148444978 got entry batch from reader: WALEntryBatch [walEntries=[{test/04d0dd054c4f9e0316fac51b61606b4b/7=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148465071/Put/vlen=228/seqid=0; >],11238}], lastWalPath=hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287, lastWalPosition=1033, nbRowKeys=1, nbHFiles=2, heapSize=11238, lastSeqIds={}, endOfFile=false,usedBufferSize=447] 2024-11-21T00:21:06,346 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping at org.apache.hadoop.hbase.regionserver.RSRpcServices.checkOpen(RSRpcServices.java:1466) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.RSRpcServices.replicateWALEntry(RSRpcServices.java:2233) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$2.callBlockingMethod(AdminProtos.java:34073) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:21:06,346 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.CallRunner(138): callId: 4 service: AdminService methodName: ReplicateWALEntry size: 603 connection: 172.17.0.2:60790 deadline: 1732148526345, exception=org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping 2024-11-21T00:21:06,348 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.HBaseInterClusterReplicationEndpoint(480): [Source for peer 1]: Peer encountered RemoteException, rechecking all sinks: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping at org.apache.hadoop.hbase.regionserver.RSRpcServices.checkOpen(RSRpcServices.java:1466) at org.apache.hadoop.hbase.regionserver.RSRpcServices.replicateWALEntry(RSRpcServices.java:2233) at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$2.callBlockingMethod(AdminProtos.java:34073) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:21:06,350 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/.tmp/hfileref/4065bbed30dc4de592f4078c1182c411 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/hfileref/4065bbed30dc4de592f4078c1182c411 2024-11-21T00:21:06,356 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/hfileref/4065bbed30dc4de592f4078c1182c411, entries=4, sequenceid=12, filesize=5.2 K 2024-11-21T00:21:06,364 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/.tmp/queue/1cd4c30b2eaf4ae8b92c95de2b9248e7 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/queue/1cd4c30b2eaf4ae8b92c95de2b9248e7 2024-11-21T00:21:06,370 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/queue/1cd4c30b2eaf4ae8b92c95de2b9248e7, entries=1, sequenceid=12, filesize=5.2 K 2024-11-21T00:21:06,371 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/table/fc90343672134f13807b2018638dbe63 is 53, key is hbase:replication/table:state/1732148460539/Put/seqid=0 2024-11-21T00:21:06,371 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.13 KB/1155, heapSize ~2.47 KB/2528, currentSize=0 B/0 for 1aca148ecd754b87a674ca563b713328 in 1150ms, sequenceid=12, compaction requested=false 2024-11-21T00:21:06,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741849_1025 (size=5308) 2024-11-21T00:21:06,398 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/table/fc90343672134f13807b2018638dbe63 2024-11-21T00:21:06,403 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/info/0560c6e868f24d79ba325b8a6eacc27b as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/info/0560c6e868f24d79ba325b8a6eacc27b 2024-11-21T00:21:06,408 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/info/0560c6e868f24d79ba325b8a6eacc27b, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:21:06,408 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/replication/1aca148ecd754b87a674ca563b713328/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-11-21T00:21:06,409 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/ns/f2ecbee62307496ca2e83495216adca7 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/ns/f2ecbee62307496ca2e83495216adca7 2024-11-21T00:21:06,409 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:21:06,409 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:21:06,409 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:06,409 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1aca148ecd754b87a674ca563b713328: Waiting for close lock at 1732148465221Running coprocessor pre-close hooks at 1732148465221Disabling compacts and flushes for region at 1732148465221Disabling writes for close at 1732148465221Obtaining lock to block concurrent updates at 1732148465221Preparing flush snapshotting stores in 1aca148ecd754b87a674ca563b713328 at 1732148465221Finished memstore snapshotting hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328., syncing WAL and waiting on mvcc, flushsize=dataSize=1155, getHeapSize=2768, getOffHeapSize=0, getCellsCount=12 at 1732148465222 (+1 ms)Flushing stores of hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. at 1732148465222Flushing 1aca148ecd754b87a674ca563b713328/hfileref: creating writer at 1732148465222Flushing 1aca148ecd754b87a674ca563b713328/hfileref: appending metadata at 1732148465264 (+42 ms)Flushing 1aca148ecd754b87a674ca563b713328/hfileref: closing flushed file at 1732148465264Flushing 1aca148ecd754b87a674ca563b713328/queue: creating writer at 1732148465707 (+443 ms)Flushing 1aca148ecd754b87a674ca563b713328/queue: appending metadata at 1732148465724 (+17 ms)Flushing 1aca148ecd754b87a674ca563b713328/queue: closing flushed file at 1732148465724Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@448f6a16: reopening flushed file at 1732148466349 (+625 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bf3ff36: reopening flushed file at 1732148466356 (+7 ms)Finished flush of dataSize ~1.13 KB/1155, heapSize ~2.47 KB/2528, currentSize=0 B/0 for 1aca148ecd754b87a674ca563b713328 in 1150ms, sequenceid=12, compaction requested=false at 1732148466371 (+15 ms)Writing region close event to WAL at 1732148466384 (+13 ms)Running coprocessor post-close hooks at 1732148466409 (+25 ms)Closed at 1732148466409 2024-11-21T00:21:06,409 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148459217.1aca148ecd754b87a674ca563b713328. 2024-11-21T00:21:06,410 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 26fa993800b450213215e7fb728f4f55, disabling compactions & flushes 2024-11-21T00:21:06,410 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:21:06,410 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:21:06,410 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148454830.26fa993800b450213215e7fb728f4f55. after waiting 0 ms 2024-11-21T00:21:06,410 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:21:06,414 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/default/test/26fa993800b450213215e7fb728f4f55/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T00:21:06,414 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/ns/f2ecbee62307496ca2e83495216adca7, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:21:06,414 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:21:06,414 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:21:06,415 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:21:06,415 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 26fa993800b450213215e7fb728f4f55: Waiting for close lock at 1732148466410Running coprocessor pre-close hooks at 1732148466410Disabling compacts and flushes for region at 1732148466410Disabling writes for close at 1732148466410Writing region close event to WAL at 1732148466410Running coprocessor post-close hooks at 1732148466414 (+4 ms)Closed at 1732148466415 (+1 ms) 2024-11-21T00:21:06,415 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148454830.26fa993800b450213215e7fb728f4f55. 2024-11-21T00:21:06,415 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/rep_barrier/bbfb24f2c5b64cb281e36cc00c2d836f as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/rep_barrier/bbfb24f2c5b64cb281e36cc00c2d836f 2024-11-21T00:21:06,421 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/rep_barrier/bbfb24f2c5b64cb281e36cc00c2d836f, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:21:06,422 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/.tmp/table/fc90343672134f13807b2018638dbe63 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/table/fc90343672134f13807b2018638dbe63 2024-11-21T00:21:06,422 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:21:06,422 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:21:06,422 DEBUG [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:21:06,428 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/table/fc90343672134f13807b2018638dbe63, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:21:06,429 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1208ms, sequenceid=16, compaction requested=false 2024-11-21T00:21:06,434 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:21:06,435 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:21:06,435 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:21:06,435 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:21:06,435 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148465221Running coprocessor pre-close hooks at 1732148465221Disabling compacts and flushes for region at 1732148465221Disabling writes for close at 1732148465221Obtaining lock to block concurrent updates at 1732148465221Preparing flush snapshotting stores in 1588230740 at 1732148465221Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148465222 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148465222Flushing 1588230740/info: creating writer at 1732148465222Flushing 1588230740/info: appending metadata at 1732148465258 (+36 ms)Flushing 1588230740/info: closing flushed file at 1732148465259 (+1 ms)Flushing 1588230740/ns: creating writer at 1732148465291 (+32 ms)Flushing 1588230740/ns: appending metadata at 1732148465317 (+26 ms)Flushing 1588230740/ns: closing flushed file at 1732148465317Flushing 1588230740/rep_barrier: creating writer at 1732148465728 (+411 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148465745 (+17 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148465745Flushing 1588230740/table: creating writer at 1732148466350 (+605 ms)Flushing 1588230740/table: appending metadata at 1732148466370 (+20 ms)Flushing 1588230740/table: closing flushed file at 1732148466370Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@21b0faf6: reopening flushed file at 1732148466402 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73ef998: reopening flushed file at 1732148466408 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56a68f73: reopening flushed file at 1732148466414 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22f7e13e: reopening flushed file at 1732148466421 (+7 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1208ms, sequenceid=16, compaction requested=false at 1732148466429 (+8 ms)Writing region close event to WAL at 1732148466431 (+2 ms)Running coprocessor post-close hooks at 1732148466435 (+4 ms)Closed at 1732148466435 2024-11-21T00:21:06,435 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:21:06,452 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping at org.apache.hadoop.hbase.regionserver.RSRpcServices.checkOpen(RSRpcServices.java:1466) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.RSRpcServices.replicateWALEntry(RSRpcServices.java:2233) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$2.callBlockingMethod(AdminProtos.java:34073) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:21:06,452 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.CallRunner(138): callId: 7 service: AdminService methodName: ReplicateWALEntry size: 603 connection: 172.17.0.2:60790 deadline: 1732148526451, exception=org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping 2024-11-21T00:21:06,452 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.HBaseInterClusterReplicationEndpoint(480): [Source for peer 1]: Peer encountered RemoteException, rechecking all sinks: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping at org.apache.hadoop.hbase.regionserver.RSRpcServices.checkOpen(RSRpcServices.java:1466) at org.apache.hadoop.hbase.regionserver.RSRpcServices.replicateWALEntry(RSRpcServices.java:2233) at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$2.callBlockingMethod(AdminProtos.java:34073) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:21:06,545 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:06,622 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,34237,1732148450960; all regions closed. 2024-11-21T00:21:06,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:21:06,627 DEBUG [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/oldWALs 2024-11-21T00:21:06,627 INFO [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C34237%2C1732148450960.meta:.meta(num 1732148453398) 2024-11-21T00:21:06,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741839_1015 (size=3127) 2024-11-21T00:21:06,629 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.rep.1732148460462 not finished, retry = 0 2024-11-21T00:21:06,656 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping at org.apache.hadoop.hbase.regionserver.RSRpcServices.checkOpen(RSRpcServices.java:1466) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.RSRpcServices.replicateWALEntry(RSRpcServices.java:2233) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$2.callBlockingMethod(AdminProtos.java:34073) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:21:06,656 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34237 {}] ipc.CallRunner(138): callId: 10 service: AdminService methodName: ReplicateWALEntry size: 603 connection: 172.17.0.2:60790 deadline: 1732148526656, exception=org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping 2024-11-21T00:21:06,657 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.HBaseInterClusterReplicationEndpoint(480): [Source for peer 1]: Peer encountered RemoteException, rechecking all sinks: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.regionserver.RegionServerStoppedException: Server 5ed4808ef0e6,34237,1732148450960 stopping at org.apache.hadoop.hbase.regionserver.RSRpcServices.checkOpen(RSRpcServices.java:1466) at org.apache.hadoop.hbase.regionserver.RSRpcServices.replicateWALEntry(RSRpcServices.java:2233) at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$2.callBlockingMethod(AdminProtos.java:34073) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:21:06,731 DEBUG [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/oldWALs 2024-11-21T00:21:06,731 INFO [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C34237%2C1732148450960.rep:(num 1732148460462) 2024-11-21T00:21:06,733 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/WALs/5ed4808ef0e6,34237,1732148450960/5ed4808ef0e6%2C34237%2C1732148450960.1732148452873 not finished, retry = 0 2024-11-21T00:21:06,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741833_1009 (size=1336) 2024-11-21T00:21:06,835 DEBUG [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/oldWALs 2024-11-21T00:21:06,835 INFO [RS:0;5ed4808ef0e6:34237 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C34237%2C1732148450960:(num 1732148452873) 2024-11-21T00:21:06,835 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:06,835 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:21:06,836 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:21:06,836 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:21:06,836 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:21:06,836 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:21:06,836 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,34237,1732148450960 because: Region server is closing 2024-11-21T00:21:06,836 INFO [RS:0;5ed4808ef0e6:34237 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:34237. 2024-11-21T00:21:06,837 DEBUG [RS:0;5ed4808ef0e6:34237 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:06,837 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:06,837 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:06,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:21:06,849 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:06,937 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.shipper5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:21:06,937 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,34237,1732148450960.replicationSource.shipper5ed4808ef0e6%2C34237%2C1732148450960,1-5ed4808ef0e6,34237,1732148450960 terminated 2024-11-21T00:21:06,938 INFO [RS:0;5ed4808ef0e6:34237 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:34237. 2024-11-21T00:21:06,938 DEBUG [RS:0;5ed4808ef0e6:34237 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:21:06,938 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:06,938 DEBUG [RS:0;5ed4808ef0e6:34237 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:21:06,938 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:21:06,938 INFO [RS:0;5ed4808ef0e6:34237 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34237 2024-11-21T00:21:06,960 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 5ed4808ef0e6:34237 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 5ed4808ef0e6/172.17.0.2:34237 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:21:06,961 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.FailedServers(52): Added failed server with address 5ed4808ef0e6:34237 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 5ed4808ef0e6/172.17.0.2:34237 2024-11-21T00:21:06,961 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.HBaseInterClusterReplicationEndpoint(495): [Source for peer 1]: Peer is unavailable, rechecking all sinks: java.net.ConnectException: Call to address=5ed4808ef0e6:34237 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 5ed4808ef0e6/172.17.0.2:34237 at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:220) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.userEventTriggered(BufferCallBeforeInitHandler.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:398) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:376) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireUserEventTriggered(AbstractChannelHandlerContext.java:368) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.userEventTriggered(DefaultChannelPipeline.java:1375) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:396) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:376) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireUserEventTriggered(DefaultChannelPipeline.java:862) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hadoop.hbase.ipc.NettyRpcConnection.failInit(NettyRpcConnection.java:210) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcConnection$2.fail(NettyRpcConnection.java:414) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcConnection$2.operationComplete(NettyRpcConnection.java:421) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcConnection$2.operationComplete(NettyRpcConnection.java:389) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setFailure0(DefaultPromise.java:629) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.tryFailure(DefaultPromise.java:118) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:679) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:698) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 5ed4808ef0e6/172.17.0.2:34237 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] ... 7 more 2024-11-21T00:21:07,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113/rs 2024-11-21T00:21:07,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1254608113/rs/5ed4808ef0e6,34237,1732148450960 2024-11-21T00:21:07,165 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:21:07,252 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:07,272 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,34237,1732148450960] 2024-11-21T00:21:07,364 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] ipc.AbstractRpcClient(357): Not trying to connect to 5ed4808ef0e6:34237 this server is in the failed servers list 2024-11-21T00:21:07,365 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.HBaseInterClusterReplicationEndpoint(501): [Source for peer 1]: Can't replicate because of a local or network error: org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=5ed4808ef0e6:34237 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 5ed4808ef0e6:34237 at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:270) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$Stub.replicateWALEntry(AdminProtos.java:34879) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.lambda$replicateWALEntry$9(AsyncRegionServerAdmin.java:166) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.call(AsyncRegionServerAdmin.java:103) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.replicateWALEntry(AsyncRegionServerAdmin.java:164) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil.replicateWALEntry(ReplicationProtobufUtil.java:62) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicateEntries(HBaseInterClusterReplicationEndpoint.java:537) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.asyncReplicate(HBaseInterClusterReplicationEndpoint.java:620) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:392) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 5ed4808ef0e6:34237 at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:361) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 12 more 2024-11-21T00:21:07,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:21:07,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34237-0x1015ac3b1060004, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:21:07,373 INFO [RS:0;5ed4808ef0e6:34237 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:21:07,373 INFO [RS:0;5ed4808ef0e6:34237 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,34237,1732148450960; zookeeper connection closed. 2024-11-21T00:21:07,373 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@493e6517 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@493e6517 2024-11-21T00:21:07,373 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:21:07,413 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-1254608113/draining/5ed4808ef0e6,34237,1732148450960 already deleted, retry=false 2024-11-21T00:21:07,413 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,34237,1732148450960 expired; onlineServers=0 2024-11-21T00:21:07,413 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,34653,1732148450571' ***** 2024-11-21T00:21:07,413 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:21:07,414 INFO [M:0;5ed4808ef0e6:34653 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:21:07,414 INFO [M:0;5ed4808ef0e6:34653 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:21:07,414 DEBUG [M:0;5ed4808ef0e6:34653 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:21:07,414 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:21:07,414 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148452597 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148452597,5,FailOnTimeoutGroup] 2024-11-21T00:21:07,414 DEBUG [M:0;5ed4808ef0e6:34653 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:21:07,414 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148452601 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148452601,5,FailOnTimeoutGroup] 2024-11-21T00:21:07,414 INFO [M:0;5ed4808ef0e6:34653 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:21:07,414 INFO [M:0;5ed4808ef0e6:34653 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:21:07,415 DEBUG [M:0;5ed4808ef0e6:34653 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:21:07,415 INFO [M:0;5ed4808ef0e6:34653 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:21:07,415 INFO [M:0;5ed4808ef0e6:34653 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:21:07,415 INFO [M:0;5ed4808ef0e6:34653 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:21:07,415 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:21:07,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-1254608113/master 2024-11-21T00:21:07,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-1254608113 2024-11-21T00:21:07,633 DEBUG [M:0;5ed4808ef0e6:34653 {}] zookeeper.ZKUtil(347): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Unable to get data of znode /1-1254608113/master because node does not exist (not an error) 2024-11-21T00:21:07,633 WARN [M:0;5ed4808ef0e6:34653 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:21:07,634 INFO [M:0;5ed4808ef0e6:34653 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/.lastflushedseqids 2024-11-21T00:21:07,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741850_1026 (size=263) 2024-11-21T00:21:07,756 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:07,866 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] ipc.AbstractRpcClient(357): Not trying to connect to 5ed4808ef0e6:34237 this server is in the failed servers list 2024-11-21T00:21:07,867 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.HBaseInterClusterReplicationEndpoint(501): [Source for peer 1]: Can't replicate because of a local or network error: org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=5ed4808ef0e6:34237 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 5ed4808ef0e6:34237 at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:270) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$Stub.replicateWALEntry(AdminProtos.java:34879) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.lambda$replicateWALEntry$9(AsyncRegionServerAdmin.java:166) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.call(AsyncRegionServerAdmin.java:103) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.replicateWALEntry(AsyncRegionServerAdmin.java:164) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil.replicateWALEntry(ReplicationProtobufUtil.java:62) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicateEntries(HBaseInterClusterReplicationEndpoint.java:537) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.asyncReplicate(HBaseInterClusterReplicationEndpoint.java:620) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:392) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 5ed4808ef0e6:34237 at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:361) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 12 more 2024-11-21T00:21:08,040 INFO [M:0;5ed4808ef0e6:34653 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:21:08,040 INFO [M:0;5ed4808ef0e6:34653 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:21:08,040 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:21:08,040 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:21:08,040 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:21:08,040 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:21:08,040 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:21:08,041 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.51 KB heapSize=64.93 KB 2024-11-21T00:21:08,065 DEBUG [M:0;5ed4808ef0e6:34653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/815f0d455523421586cf4e1e33184424 is 82, key is hbase:meta,,1/info:regioninfo/1732148453434/Put/seqid=0 2024-11-21T00:21:08,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741851_1027 (size=5672) 2024-11-21T00:21:08,361 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:08,468 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] ipc.AbstractRpcClient(357): Not trying to connect to 5ed4808ef0e6:34237 this server is in the failed servers list 2024-11-21T00:21:08,468 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.HBaseInterClusterReplicationEndpoint(501): [Source for peer 1]: Can't replicate because of a local or network error: org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=5ed4808ef0e6:34237 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 5ed4808ef0e6:34237 at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:270) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos$AdminService$Stub.replicateWALEntry(AdminProtos.java:34879) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.lambda$replicateWALEntry$9(AsyncRegionServerAdmin.java:166) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.call(AsyncRegionServerAdmin.java:103) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionServerAdmin.replicateWALEntry(AsyncRegionServerAdmin.java:164) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.protobuf.ReplicationProtobufUtil.replicateWALEntry(ReplicationProtobufUtil.java:62) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicateEntries(HBaseInterClusterReplicationEndpoint.java:537) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.asyncReplicate(HBaseInterClusterReplicationEndpoint.java:620) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:392) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 5ed4808ef0e6:34237 at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:361) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 12 more 2024-11-21T00:21:08,481 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/815f0d455523421586cf4e1e33184424 2024-11-21T00:21:08,501 DEBUG [M:0;5ed4808ef0e6:34653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a79a65dbea5f4f5cab066c495ca18764 is 1480, key is \x00\x00\x00\x00\x00\x00\x00\x08/proc:d/1732148460548/Put/seqid=0 2024-11-21T00:21:08,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741852_1028 (size=8517) 2024-11-21T00:21:08,906 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=54.96 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a79a65dbea5f4f5cab066c495ca18764 2024-11-21T00:21:08,930 DEBUG [M:0;5ed4808ef0e6:34653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3901df6859ce47f8826fca4fc847eaf2 is 69, key is 5ed4808ef0e6,34237,1732148450960/rs:state/1732148452653/Put/seqid=0 2024-11-21T00:21:08,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38465 is added to blk_1073741853_1029 (size=5156) 2024-11-21T00:21:09,064 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:09,170 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-26-2 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 5ed4808ef0e6:34237 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 5ed4808ef0e6/172.17.0.2:34237 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:21:09,347 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3901df6859ce47f8826fca4fc847eaf2 2024-11-21T00:21:09,352 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/815f0d455523421586cf4e1e33184424 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/815f0d455523421586cf4e1e33184424 2024-11-21T00:21:09,358 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/815f0d455523421586cf4e1e33184424, entries=8, sequenceid=97, filesize=5.5 K 2024-11-21T00:21:09,360 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a79a65dbea5f4f5cab066c495ca18764 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a79a65dbea5f4f5cab066c495ca18764 2024-11-21T00:21:09,365 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a79a65dbea5f4f5cab066c495ca18764, entries=11, sequenceid=97, filesize=8.3 K 2024-11-21T00:21:09,366 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/3901df6859ce47f8826fca4fc847eaf2 as hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3901df6859ce47f8826fca4fc847eaf2 2024-11-21T00:21:09,372 INFO [M:0;5ed4808ef0e6:34653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/3901df6859ce47f8826fca4fc847eaf2, entries=1, sequenceid=97, filesize=5.0 K 2024-11-21T00:21:09,869 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:10,773 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:11,778 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:11,912 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:21:11,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:11,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:11,952 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:11,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:11,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:11,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:11,972 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:21:12,901 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:14,108 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:14,216 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:21:15,412 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:16,816 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:18,320 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:19,923 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:20,139 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:21:21,628 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:23,432 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:25,336 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:27,340 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:28,593 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T00:21:28,593 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T00:21:29,443 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:31,648 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:33,959 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:36,363 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:38,934 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:39,456 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:21:41,541 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:44,216 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:21:44,244 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:47,047 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:47,223 DEBUG [regionserver/5ed4808ef0e6:0.Chore.1 {}] throttle.PressureAwareCompactionThroughputController(103): CompactionPressure is 0.07692307692307693, tune throughput to 53.85 MB/second 2024-11-21T00:21:49,950 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:50,139 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:21:52,957 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:56,071 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:21:59,276 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:02,579 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:05,117 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:44978 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741839_1015] {}] datanode.DataXceiver(331): 127.0.0.1:45219:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44978 dst: /127.0.0.1:45219 java.net.SocketTimeoutException: 60000 millis timeout while waiting for channel to be ready for read. ch : java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45219 remote=/127.0.0.1:44978] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:163) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:22:05,984 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5ed4808ef0e6:34653 455 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 16 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@d1db0eb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 25 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a794780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 2601 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.CountDownLatch$Sync@52ee9161 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 938 Waited count: 1768 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) app//org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:221) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) Thread 32 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@53047f3c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 34 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1301779b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 44 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 137 (ForkJoinPool-2-worker-2): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.ForkJoinPool@629adae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 138 (ForkJoinPool-2-worker-3): State: WAITING Blocked count: 0 Waited count: 10 Waiting on java.util.concurrent.ForkJoinPool@629adae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 152 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@20ea4193 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 702 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48c23f15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@3bb7d7bf): State: TIMED_WAITING Blocked count: 0 Waited count: 84 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418d319b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 24771 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 285 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f55408b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 301 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12b3f654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 303 (RPCClient-NioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 304 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 305 (RPCClient-NioEventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 242 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 24591 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 306 (RPCClient-NioEventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 307 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 417 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 308 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ec9cda9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 309 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56603a02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 318 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 419 (HMaster-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (MiniHBaseClusterRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (MiniHBaseClusterRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 496 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53b4795d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (HMaster-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e383429 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 535 (MiniHBaseClusterRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e2c1ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-4-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (HMaster-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-4-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-4-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 422 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 549 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39cb992f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 550 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49c561da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-4-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (RPCClient-NioEventLoopGroup-4-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (RPCClient-NioEventLoopGroup-4-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (RPCClient-NioEventLoopGroup-4-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (RPCClient-NioEventLoopGroup-4-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (RPCClient-NioEventLoopGroup-4-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (RPCClient-NioEventLoopGroup-4-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 678 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 695 (MiniHBaseClusterRegionServer-EventLoopGroup-8-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 724 (MiniHBaseClusterRegionServer-EventLoopGroup-8-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 744 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56c39db7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 754 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 763 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 769 (MiniHBaseClusterRegionServer-EventLoopGroup-8-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 825 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 826 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 828 (RPCClient-NioEventLoopGroup-4-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 829 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 830 (RPCClient-NioEventLoopGroup-4-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 831 (RPCClient-NioEventLoopGroup-4-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 967 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1070 (HMaster-EventLoopGroup-9-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1102 (MiniHBaseClusterRegionServer-EventLoopGroup-10-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (MiniHBaseClusterRegionServer-EventLoopGroup-10-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1161 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d70bb7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1175 (HMaster-EventLoopGroup-9-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f59c6ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1200 (MiniHBaseClusterRegionServer-EventLoopGroup-10-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@153dd6c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (HMaster-EventLoopGroup-9-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 1211 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d5159c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1212 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fc6e35a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1222 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1324 (HMaster-EventLoopGroup-11-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1341 (MiniHBaseClusterRegionServer-EventLoopGroup-12-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1383 (MiniHBaseClusterRegionServer-EventLoopGroup-12-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1400 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ebb900e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1416 (HMaster-EventLoopGroup-11-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1427 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@254bded4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1439 (MiniHBaseClusterRegionServer-EventLoopGroup-12-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1446 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a6d5614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1448 (HMaster-EventLoopGroup-11-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1449 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 381 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 1450 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9b630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1451 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706d87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1722 (HMaster-EventLoopGroup-13-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1754 (MiniHBaseClusterRegionServer-EventLoopGroup-14-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1796 (MiniHBaseClusterRegionServer-EventLoopGroup-14-2): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1813 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bb6be23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1827 (HMaster-EventLoopGroup-13-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1836 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b7b8ab7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1852 (MiniHBaseClusterRegionServer-EventLoopGroup-14-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1859 (region-location-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1861 (HMaster-EventLoopGroup-13-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1862 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 205 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Thread 1863 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27538485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1864 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@695a71ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1873 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1975 (HMaster-EventLoopGroup-15-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1992 (MiniHBaseClusterRegionServer-EventLoopGroup-16-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2034 (MiniHBaseClusterRegionServer-EventLoopGroup-16-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2051 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e437044 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2062 (HMaster-EventLoopGroup-15-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2074 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c03c07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2091 (MiniHBaseClusterRegionServer-EventLoopGroup-16-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2098 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@274f18d2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2100 (HMaster-EventLoopGroup-15-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2101 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 201 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 2102 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1da0e8f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2103 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58a47a50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2137 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2293 (region-location-1): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2400 (Abort regionserver monitor): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2482 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2572 (ForkJoinPool-2-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2587 (HMaster-EventLoopGroup-17-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2619 (MiniHBaseClusterRegionServer-EventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2661 (MiniHBaseClusterRegionServer-EventLoopGroup-18-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2678 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64006344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2692 (HMaster-EventLoopGroup-17-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2701 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8641a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2718 (MiniHBaseClusterRegionServer-EventLoopGroup-18-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2725 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be91210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2727 (HMaster-EventLoopGroup-17-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2728 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 159 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 2729 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b746a76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2730 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c093c1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2739 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2841 (HMaster-EventLoopGroup-19-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2858 (MiniHBaseClusterRegionServer-EventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2900 (MiniHBaseClusterRegionServer-EventLoopGroup-20-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2917 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@233de56d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2931 (HMaster-EventLoopGroup-19-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2940 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871a88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2957 (MiniHBaseClusterRegionServer-EventLoopGroup-20-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2964 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3878eb34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (HMaster-EventLoopGroup-19-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2967 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 2968 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dc4d1e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2969 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219e57b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3065 (HMaster-EventLoopGroup-21-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3082 (MiniHBaseClusterRegionServer-EventLoopGroup-22-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3112 (MiniHBaseClusterRegionServer-EventLoopGroup-22-2): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3132 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79954a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3142 (HMaster-EventLoopGroup-21-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3151 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 3157 (MiniHBaseClusterRegionServer-EventLoopGroup-22-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3193 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3194 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3196 (HMaster-EventLoopGroup-21-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3311 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3312 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3313 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 3310 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 3314 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 3315 (SyncThread:0): State: WAITING Blocked count: 0 Waited count: 274 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d4bb5fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 3316 (ProcessThread(sid:0 cport:49683):): State: WAITING Blocked count: 0 Waited count: 289 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3b087a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 3317 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4daf4e66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 3318 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3326 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f770010): State: TIMED_WAITING Blocked count: 1 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3327 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3328 (pool-1100-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (qtp1377509192-3329): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3330 (qtp1377509192-3330): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3331 (qtp1377509192-3331): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3332 (qtp1377509192-3332): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3333 (qtp1377509192-3333-acceptor-0@5c3eb09e-ServerConnector@78a383c6{HTTP/1.1, (http/1.1)}{localhost:41239}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3334 (qtp1377509192-3334): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3335 (qtp1377509192-3335): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3336 (qtp1377509192-3336): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3337 (Session-HouseKeeper-2ecd32c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3338 (pool-1101-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3345 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46bc032c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3347 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3348 (IPC Server idle connection scanner for port 34141): State: TIMED_WAITING Blocked count: 1 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3350 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3353 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@687e5a89): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3354 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3341 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7d8449d8): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3339 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3340 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 8258 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3343 (Block report processor): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47a7a624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3349 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3346 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3356 (IPC Server handler 0 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3357 (IPC Server handler 1 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 178 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3358 (IPC Server handler 2 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3359 (IPC Server handler 3 on default port 34141): State: TIMED_WAITING Blocked count: 5 Waited count: 176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3360 (IPC Server handler 4 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 175 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3361 (pool-1106-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3363 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6f4c8732): State: TIMED_WAITING Blocked count: 0 Waited count: 43 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3364 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@3dd894f0): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3365 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@a75ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3366 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@24b24059): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3367 (CacheReplicationMonitor(677237174)): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3377 (pool-1112-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3378 (qtp1577786156-3378): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3379 (qtp1577786156-3379-acceptor-0@d163538-ServerConnector@4c8a0263{HTTP/1.1, (http/1.1)}{localhost:40451}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3380 (qtp1577786156-3380): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3381 (qtp1577786156-3381): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3382 (Session-HouseKeeper-327d7abf-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (nioEventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3384 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5aa0588f): State: TIMED_WAITING Blocked count: 2 Waited count: 168 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3386 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3387 (IPC Server idle connection scanner for port 39975): State: TIMED_WAITING Blocked count: 1 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3389 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3392 (Command processor): State: WAITING Blocked count: 0 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3153d1dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3393 (BP-913626249-172.17.0.2-1732148442480 heartbeating to localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 74 Waited count: 104 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3394 (pool-1114-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3395 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins): State: TIMED_WAITING Blocked count: 49 Waited count: 50 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3376 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@86acec8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3396 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3388 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3385 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3397 (IPC Server handler 0 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3398 (IPC Server handler 1 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3399 (IPC Server handler 2 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3400 (IPC Server handler 3 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3401 (IPC Server handler 4 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3406 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3407 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3412 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3413 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3417 (pool-1109-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3420 (java.util.concurrent.ThreadPoolExecutor$Worker@6d2378c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3421 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3423 (LeaseRenewer:jenkins@localhost:34141): State: TIMED_WAITING Blocked count: 2 Waited count: 88 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3429 (HMaster-EventLoopGroup-23-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3430 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3431 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c78661c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3432 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3434 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3435 (zk-event-processor-pool-0): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a467ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3436 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3437 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3438 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3439 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3440 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3441 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3442 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3443 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3444 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3445 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3446 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3447 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3448 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3450 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f5fd45d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3451 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@43e2a383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3452 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 45 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c257565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3453 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 789 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255d9f72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3454 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3455 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3456 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12690708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3457 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7580c67d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3458 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c6f71b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3459 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7235f80f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3462 (MiniHBaseClusterRegionServer-EventLoopGroup-24-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3465 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28bef006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3466 (zk-event-processor-pool-0): State: WAITING Blocked count: 14 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bfe6842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3467 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 3469 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3471 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3472 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@36063768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3473 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 30 Waited count: 37 Waiting on java.util.concurrent.Semaphore$NonfairSync@71197954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3474 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 40 Waited count: 48 Waiting on java.util.concurrent.Semaphore$NonfairSync@2800e149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3475 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219f8229 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3476 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33745): State: WAITING Blocked count: 4 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3477 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33745): State: WAITING Blocked count: 2 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3478 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ccc525c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3479 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3369e225 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3480 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 14 Waited count: 30 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ebaa0ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3481 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62f39c0d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3460 (M:0;5ed4808ef0e6:35861): State: TIMED_WAITING Blocked count: 1 Waited count: 820 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:625) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3495 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 810 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3504 (MiniHBaseClusterRegionServer-EventLoopGroup-24-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3505 (DataXceiver for client DFSClient_NONMAPREDUCE_742485806_20 at /127.0.0.1:51882 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3506 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 103 Waited count: 104 Waiting on java.util.ArrayDeque@5b538681 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3507 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData-prefix:5ed4808ef0e6,35861,1732148444638): State: WAITING Blocked count: 0 Waited count: 237 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25be6a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3510 (master:store-Flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor.flushLoop(MasterRegionFlusherAndCompactor.java:200) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor$$Lambda$479/0x00007f205c9f9bd8.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3518 (ProcedureDispatcherTimeoutThread): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread.run(RemoteProcedureDispatcher.java:328) Thread 3519 (5ed4808ef0e6:35861): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b481a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.waitOnAssignQueue(AssignmentManager.java:2390) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.processAssignQueue(AssignmentManager.java:2412) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager$1.run(AssignmentManager.java:2352) Thread 3520 (normalizer-worker-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6df0091b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue.take(RegionNormalizerWorkQueue.java:146) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker.run(RegionNormalizerWorker.java:191) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3521 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3511 (ProcExecTimeout): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:274) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3512 (WorkerMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3513 (PEWorker-1): State: TIMED_WAITING Blocked count: 46 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3514 (PEWorker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3515 (PEWorker-3): State: TIMED_WAITING Blocked count: 23 Waited count: 57 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3516 (PEWorker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3517 (PEWorker-5): State: TIMED_WAITING Blocked count: 1 Waited count: 28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3525 (OldWALsCleaner-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b2e289 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.deleteFile(LogCleaner.java:172) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.lambda$createOldWalsCleaner$1(LogCleaner.java:152) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner$$Lambda$598/0x00007f205cacf298.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3526 (master/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3528 (snapshot-hfile-cleaner-cache-refresher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3529 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cebe51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$1.run(HFileCleaner.java:254) Thread 3532 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58e9f6a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$2.run(HFileCleaner.java:269) Thread 3482 (RS:0;5ed4808ef0e6:33745): State: TIMED_WAITING Blocked count: 809 Waited count: 1593 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:906) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) java.base@17.0.11/java.security.AccessController.executePrivileged(AccessController.java:776) java.base@17.0.11/java.security.AccessController.doPrivileged(AccessController.java:399) java.base@17.0.11/javax.security.auth.Subject.doAs(Subject.java:376) app//org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) app//org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3533 (BootstrapNodeManager): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3535 (HMaster-EventLoopGroup-23-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3536 (RegionServerTracker-0): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6aa13e91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3537 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins.hfs.10): State: TIMED_WAITING Blocked count: 282 Waited count: 283 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3538 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 280 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3539 (JvmPauseMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 161 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:148) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3540 (RS:0;5ed4808ef0e6:33745-longCompactions-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a8b6720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3541 (regionserver/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3543 (regionserver/5ed4808ef0e6:0.logRoller): State: TIMED_WAITING Blocked count: 0 Waited count: 798 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3545 (MemStoreFlusher.0): State: TIMED_WAITING Blocked count: 0 Waited count: 798 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:77) app//org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:323) Thread 3544 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@237b99c1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3542 (regionserver/5ed4808ef0e6:0.leaseChecker): State: TIMED_WAITING Blocked count: 0 Waited count: 798 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Thread 3548 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51900 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3549 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 6 Waited count: 7 Waiting on java.util.ArrayDeque@566951c3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3550 (LeaseRenewer:jenkins.hfs.10@localhost:34141): State: TIMED_WAITING Blocked count: 2 Waited count: 83 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3551 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73c7bfc9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3560 (MiniHBaseClusterRegionServer-EventLoopGroup-24-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3561 (RS_OPEN_META-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33092bee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3562 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51908 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3563 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 16 Waited count: 17 Waiting on java.util.ArrayDeque@4cc309df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3564 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.meta): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5038aecd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3567 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ecf6af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3568 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3569 (HMaster-EventLoopGroup-23-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3570 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3571 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@303a142a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3572 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e18a26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3580 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2db0d51d): State: TIMED_WAITING Blocked count: 5 Waited count: 158 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3581 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3582 (pool-1206-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3583 (qtp1341535794-3583): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3584 (qtp1341535794-3584): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3585 (qtp1341535794-3585): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3586 (qtp1341535794-3586): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3587 (qtp1341535794-3587-acceptor-0@3dd8f85e-ServerConnector@5cf1b482{HTTP/1.1, (http/1.1)}{localhost:33319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3588 (qtp1341535794-3588): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3589 (qtp1341535794-3589): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3590 (qtp1341535794-3590): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3591 (Session-HouseKeeper-1a5d7d7b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3592 (pool-1207-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3599 (FSEditLogAsync): State: WAITING Blocked count: 1 Waited count: 200 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@501965e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3601 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3602 (IPC Server idle connection scanner for port 37411): State: TIMED_WAITING Blocked count: 1 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3604 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3607 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@16dcedcf): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3608 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3595 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2cab800b): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3593 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3594 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 7706 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3597 (Block report processor): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3441d181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3603 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3600 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3610 (IPC Server handler 0 on default port 37411): State: TIMED_WAITING Blocked count: 4 Waited count: 209 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3611 (IPC Server handler 1 on default port 37411): State: TIMED_WAITING Blocked count: 0 Waited count: 207 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3612 (IPC Server handler 2 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 206 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3613 (IPC Server handler 3 on default port 37411): State: TIMED_WAITING Blocked count: 2 Waited count: 210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3614 (IPC Server handler 4 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 209 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3615 (pool-1212-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3617 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@45bccaf9): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3618 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@4915eba0): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3619 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@75ca663c): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3620 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@1d86b1c2): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3621 (CacheReplicationMonitor(449845788)): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3631 (pool-1218-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3632 (qtp1777667352-3632): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3633 (qtp1777667352-3633-acceptor-0@34a75aa7-ServerConnector@5b981b4e{HTTP/1.1, (http/1.1)}{localhost:43351}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3634 (qtp1777667352-3634): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3635 (qtp1777667352-3635): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3636 (Session-HouseKeeper-67fcf623-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3637 (nioEventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3638 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@64c83422): State: TIMED_WAITING Blocked count: 7 Waited count: 157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3640 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3641 (IPC Server idle connection scanner for port 41981): State: TIMED_WAITING Blocked count: 1 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3643 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3646 (Command processor): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e00196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3647 (BP-493474764-172.17.0.2-1732148448119 heartbeating to localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 96 Waited count: 121 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3648 (pool-1220-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3630 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@68bc995a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3642 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3639 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3651 (IPC Server handler 0 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3652 (IPC Server handler 1 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3653 (IPC Server handler 2 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3654 (IPC Server handler 3 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3655 (IPC Server handler 4 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3649 (IPC Client (1040632728) connection to localhost/127.0.0.1:37411 from jenkins): State: TIMED_WAITING Blocked count: 58 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3650 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3660 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3661 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3665 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3667 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3671 (pool-1215-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3675 (java.util.concurrent.ThreadPoolExecutor$Worker@f1b5be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3676 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3678 (LeaseRenewer:jenkins@localhost:37411): State: TIMED_WAITING Blocked count: 2 Waited count: 82 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3683 (HMaster-EventLoopGroup-25-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3684 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3685 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bffda6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3686 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e6696af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3688 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d2232ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3689 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61f34110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3690 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.Semaphore$NonfairSync@348ac3f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3691 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e13e821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3692 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3693 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3694 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@795b2be5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3695 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1b2cea74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3696 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a1f5bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3697 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2095fd85 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3700 (MiniHBaseClusterRegionServer-EventLoopGroup-26-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3698 (M:0;5ed4808ef0e6:34653): State: TIMED_WAITING Blocked count: 87 Waited count: 253 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$559/0x00007f205ca8e370.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) Thread 3734 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 752 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3743 (MiniHBaseClusterRegionServer-EventLoopGroup-26-2): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.CompletableFuture$Signaller@7cf3dec4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.fetchPeerAddresses(HBaseReplicationEndpoint.java:203) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.chooseSinks(HBaseReplicationEndpoint.java:211) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.reportBadSink(HBaseReplicationEndpoint.java:257) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.onReplicateWALEntryException(HBaseInterClusterReplicationEndpoint.java:558) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.lambda$replicateEntries$2(HBaseInterClusterReplicationEndpoint.java:541) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint$$Lambda$1181/0x00007f205cd0d2d0.accept(Unknown Source) app//org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) app//org.apache.hadoop.hbase.util.FutureUtils$$Lambda$432/0x00007f205c9c66f0.accept(Unknown Source) java.base@17.0.11/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) java.base@17.0.11/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) java.base@17.0.11/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) java.base@17.0.11/java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) Thread 3744 (DataXceiver for client DFSClient_NONMAPREDUCE_-1900668902_20 at /127.0.0.1:60240 [Receiving block BP-493474764-172.17.0.2-1732148448119:blk_1073741830_1006]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3745 (PacketResponder: BP-493474764-172.17.0.2-1732148448119:blk_1073741830_1006, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 94 Waited count: 95 Waiting on java.util.ArrayDeque@73056d0a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3746 (AsyncFSWAL-0-hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData-prefix:5ed4808ef0e6,34653,1732148450571): State: WAITING Blocked count: 0 Waited count: 222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc8576d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3760 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3778 (HMaster-EventLoopGroup-25-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3779 (RegionServerTracker-0): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74ad5a4d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3787 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9804ef Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3799 (MiniHBaseClusterRegionServer-EventLoopGroup-26-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3806 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12554d2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3808 (HMaster-EventLoopGroup-25-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3809 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3810 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51c15b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3811 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fa00c5e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3824 (RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 18 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23f3d639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3843 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3844 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3859 (RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f387c04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3862 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.rep): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32c721c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3866 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 4 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@170f29bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3868 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3869 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: TIMED_WAITING Blocked count: 146 Waited count: 242 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) Thread 3870 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.CompletableFuture$Signaller@31911a48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) Thread 3964 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4108 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 4154 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:22:08,100 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1900668902_20 at /127.0.0.1:60240 [Receiving block BP-493474764-172.17.0.2-1732148448119:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38465:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60240 dst: /127.0.0.1:38465 java.net.SocketTimeoutException: 60000 millis timeout while waiting for channel to be ready for read. ch : java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38465 remote=/127.0.0.1:60240] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:163) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-21T00:22:09,287 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:22:09,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,34653,1732148450571 2024-11-21T00:22:09,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-26-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@574211fa 2024-11-21T00:22:09,493 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:13,105 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:14,216 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:22:16,809 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:20,140 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:22:20,622 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:24,456 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:22:24,541 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:28,548 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:32,672 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:36,894 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:41,197 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:44,217 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:22:45,617 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:47,180 WARN [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 9, running: 1 2024-11-21T00:22:50,140 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:22:50,143 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:54,749 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:22:57,267 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-21T00:22:57,269 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=9, created chunk count=18, reused chunk count=57, reuseRatio=76.00% 2024-11-21T00:22:59,453 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:04,267 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:05,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,290 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,801 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:23:05,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,852 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,853 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:23:05,871 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5ed4808ef0e6:34653 453 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 16 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@d1db0eb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 25 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a794780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3201 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 33 Waiting on java.util.concurrent.CountDownLatch$Sync@3cdcfaa7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 938 Waited count: 1769 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) app//org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:221) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) Thread 32 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@53047f3c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 34 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1301779b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 44 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 137 (ForkJoinPool-2-worker-2): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.ForkJoinPool@629adae Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 138 (ForkJoinPool-2-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 152 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@20ea4193 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 924 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48c23f15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@3bb7d7bf): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 179 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418d319b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 30788 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 285 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f55408b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 301 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12b3f654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 303 (RPCClient-NioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 304 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 305 (RPCClient-NioEventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 242 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 30608 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 306 (RPCClient-NioEventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 307 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 530 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 308 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ec9cda9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 309 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56603a02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 318 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 419 (HMaster-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (MiniHBaseClusterRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (MiniHBaseClusterRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 496 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53b4795d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (HMaster-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e383429 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 535 (MiniHBaseClusterRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e2c1ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-4-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (HMaster-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-4-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-4-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 531 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 549 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39cb992f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 550 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49c561da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-4-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (RPCClient-NioEventLoopGroup-4-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (RPCClient-NioEventLoopGroup-4-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (RPCClient-NioEventLoopGroup-4-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (RPCClient-NioEventLoopGroup-4-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (RPCClient-NioEventLoopGroup-4-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (RPCClient-NioEventLoopGroup-4-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 678 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 695 (MiniHBaseClusterRegionServer-EventLoopGroup-8-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 724 (MiniHBaseClusterRegionServer-EventLoopGroup-8-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 744 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56c39db7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 754 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 763 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 769 (MiniHBaseClusterRegionServer-EventLoopGroup-8-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 825 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 826 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 828 (RPCClient-NioEventLoopGroup-4-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 829 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 830 (RPCClient-NioEventLoopGroup-4-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 831 (RPCClient-NioEventLoopGroup-4-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 967 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1070 (HMaster-EventLoopGroup-9-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1102 (MiniHBaseClusterRegionServer-EventLoopGroup-10-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (MiniHBaseClusterRegionServer-EventLoopGroup-10-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1161 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d70bb7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1175 (HMaster-EventLoopGroup-9-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f59c6ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1200 (MiniHBaseClusterRegionServer-EventLoopGroup-10-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@153dd6c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (HMaster-EventLoopGroup-9-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 483 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 1211 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d5159c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1212 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fc6e35a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1222 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1324 (HMaster-EventLoopGroup-11-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1341 (MiniHBaseClusterRegionServer-EventLoopGroup-12-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1383 (MiniHBaseClusterRegionServer-EventLoopGroup-12-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1400 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ebb900e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1416 (HMaster-EventLoopGroup-11-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1427 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@254bded4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1439 (MiniHBaseClusterRegionServer-EventLoopGroup-12-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1446 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a6d5614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1448 (HMaster-EventLoopGroup-11-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1449 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 494 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 1450 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9b630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1451 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706d87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1722 (HMaster-EventLoopGroup-13-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1754 (MiniHBaseClusterRegionServer-EventLoopGroup-14-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1796 (MiniHBaseClusterRegionServer-EventLoopGroup-14-2): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1813 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bb6be23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1827 (HMaster-EventLoopGroup-13-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1836 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b7b8ab7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1852 (MiniHBaseClusterRegionServer-EventLoopGroup-14-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1859 (region-location-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1861 (HMaster-EventLoopGroup-13-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1862 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 317 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 1863 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27538485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1864 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@695a71ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1873 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1975 (HMaster-EventLoopGroup-15-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1992 (MiniHBaseClusterRegionServer-EventLoopGroup-16-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2034 (MiniHBaseClusterRegionServer-EventLoopGroup-16-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2051 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e437044 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2062 (HMaster-EventLoopGroup-15-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2074 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c03c07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2091 (MiniHBaseClusterRegionServer-EventLoopGroup-16-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2098 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@274f18d2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2100 (HMaster-EventLoopGroup-15-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2101 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 2102 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1da0e8f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2103 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58a47a50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2137 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2293 (region-location-1): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2400 (Abort regionserver monitor): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2482 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 35 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2587 (HMaster-EventLoopGroup-17-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2619 (MiniHBaseClusterRegionServer-EventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2661 (MiniHBaseClusterRegionServer-EventLoopGroup-18-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2678 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64006344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2692 (HMaster-EventLoopGroup-17-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2701 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8641a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2718 (MiniHBaseClusterRegionServer-EventLoopGroup-18-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2725 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be91210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2727 (HMaster-EventLoopGroup-17-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2728 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 266 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 2729 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b746a76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2730 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c093c1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2739 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2841 (HMaster-EventLoopGroup-19-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2858 (MiniHBaseClusterRegionServer-EventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2900 (MiniHBaseClusterRegionServer-EventLoopGroup-20-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2917 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@233de56d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2931 (HMaster-EventLoopGroup-19-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2940 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871a88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2957 (MiniHBaseClusterRegionServer-EventLoopGroup-20-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2964 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3878eb34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (HMaster-EventLoopGroup-19-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2967 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 267 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 2968 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dc4d1e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2969 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219e57b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3065 (HMaster-EventLoopGroup-21-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3082 (MiniHBaseClusterRegionServer-EventLoopGroup-22-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3112 (MiniHBaseClusterRegionServer-EventLoopGroup-22-2): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3132 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79954a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3142 (HMaster-EventLoopGroup-21-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3151 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 3157 (MiniHBaseClusterRegionServer-EventLoopGroup-22-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3193 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3194 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3196 (HMaster-EventLoopGroup-21-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3311 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3312 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3313 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 3310 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 3314 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 3315 (SyncThread:0): State: WAITING Blocked count: 0 Waited count: 296 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d4bb5fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 3316 (ProcessThread(sid:0 cport:49683):): State: WAITING Blocked count: 0 Waited count: 311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3b087a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 3317 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 318 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4daf4e66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 3318 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3326 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f770010): State: TIMED_WAITING Blocked count: 1 Waited count: 290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3327 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3328 (pool-1100-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (qtp1377509192-3329): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3330 (qtp1377509192-3330): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3331 (qtp1377509192-3331): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3332 (qtp1377509192-3332): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3333 (qtp1377509192-3333-acceptor-0@5c3eb09e-ServerConnector@78a383c6{HTTP/1.1, (http/1.1)}{localhost:41239}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3334 (qtp1377509192-3334): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3335 (qtp1377509192-3335): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3336 (qtp1377509192-3336): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3337 (Session-HouseKeeper-2ecd32c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3338 (pool-1101-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3345 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46bc032c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3347 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3348 (IPC Server idle connection scanner for port 34141): State: TIMED_WAITING Blocked count: 1 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3350 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3353 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@687e5a89): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3354 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3341 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7d8449d8): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3339 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 49 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3340 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 14136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3343 (Block report processor): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47a7a624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3349 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3346 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3356 (IPC Server handler 0 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 239 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3357 (IPC Server handler 1 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 241 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3358 (IPC Server handler 2 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 239 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3359 (IPC Server handler 3 on default port 34141): State: TIMED_WAITING Blocked count: 5 Waited count: 239 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3360 (IPC Server handler 4 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 237 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3361 (pool-1106-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3363 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6f4c8732): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3364 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@3dd894f0): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3365 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@a75ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3366 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@24b24059): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3367 (CacheReplicationMonitor(677237174)): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3377 (pool-1112-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3378 (qtp1577786156-3378): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3379 (qtp1577786156-3379-acceptor-0@d163538-ServerConnector@4c8a0263{HTTP/1.1, (http/1.1)}{localhost:40451}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3380 (qtp1577786156-3380): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3381 (qtp1577786156-3381): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3382 (Session-HouseKeeper-327d7abf-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (nioEventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3384 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5aa0588f): State: TIMED_WAITING Blocked count: 2 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3386 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3387 (IPC Server idle connection scanner for port 39975): State: TIMED_WAITING Blocked count: 1 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3389 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3392 (Command processor): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3153d1dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3393 (BP-913626249-172.17.0.2-1732148442480 heartbeating to localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 94 Waited count: 144 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3394 (pool-1114-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3395 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins): State: TIMED_WAITING Blocked count: 69 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3376 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@86acec8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3396 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3388 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3385 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3397 (IPC Server handler 0 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3398 (IPC Server handler 1 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3399 (IPC Server handler 2 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3400 (IPC Server handler 3 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3401 (IPC Server handler 4 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3406 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3407 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3412 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3413 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3417 (pool-1109-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3420 (java.util.concurrent.ThreadPoolExecutor$Worker@6d2378c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3421 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3423 (LeaseRenewer:jenkins@localhost:34141): State: TIMED_WAITING Blocked count: 4 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3429 (HMaster-EventLoopGroup-23-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3430 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3431 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c78661c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3432 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3434 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3435 (zk-event-processor-pool-0): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a467ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3436 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3437 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3438 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 44 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3439 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3440 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3441 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3442 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3443 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3444 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3445 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3446 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3447 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3448 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 43 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3450 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f5fd45d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3451 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@43e2a383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3452 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c257565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3453 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1373 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255d9f72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3454 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3455 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3456 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12690708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3457 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7580c67d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3458 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c6f71b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3459 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7235f80f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3462 (MiniHBaseClusterRegionServer-EventLoopGroup-24-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3465 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28bef006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3466 (zk-event-processor-pool-0): State: WAITING Blocked count: 14 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bfe6842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3467 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 3469 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3471 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3472 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@36063768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3473 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 30 Waited count: 37 Waiting on java.util.concurrent.Semaphore$NonfairSync@71197954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3474 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 40 Waited count: 48 Waiting on java.util.concurrent.Semaphore$NonfairSync@2800e149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3475 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219f8229 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3476 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33745): State: WAITING Blocked count: 4 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3477 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33745): State: WAITING Blocked count: 2 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3478 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ccc525c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3479 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3369e225 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3480 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 14 Waited count: 30 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ebaa0ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3481 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62f39c0d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3460 (M:0;5ed4808ef0e6:35861): State: TIMED_WAITING Blocked count: 1 Waited count: 1420 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:625) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3495 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 1409 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3504 (MiniHBaseClusterRegionServer-EventLoopGroup-24-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3505 (DataXceiver for client DFSClient_NONMAPREDUCE_742485806_20 at /127.0.0.1:51882 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3506 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 105 Waited count: 106 Waiting on java.util.ArrayDeque@5b538681 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3507 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData-prefix:5ed4808ef0e6,35861,1732148444638): State: WAITING Blocked count: 0 Waited count: 237 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25be6a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3510 (master:store-Flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor.flushLoop(MasterRegionFlusherAndCompactor.java:200) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor$$Lambda$479/0x00007f205c9f9bd8.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3518 (ProcedureDispatcherTimeoutThread): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread.run(RemoteProcedureDispatcher.java:328) Thread 3519 (5ed4808ef0e6:35861): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b481a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.waitOnAssignQueue(AssignmentManager.java:2390) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.processAssignQueue(AssignmentManager.java:2412) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager$1.run(AssignmentManager.java:2352) Thread 3520 (normalizer-worker-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6df0091b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue.take(RegionNormalizerWorkQueue.java:146) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker.run(RegionNormalizerWorker.java:191) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3521 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3511 (ProcExecTimeout): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:274) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3512 (WorkerMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3513 (PEWorker-1): State: TIMED_WAITING Blocked count: 46 Waited count: 101 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3514 (PEWorker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3515 (PEWorker-3): State: TIMED_WAITING Blocked count: 23 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3516 (PEWorker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3517 (PEWorker-5): State: TIMED_WAITING Blocked count: 1 Waited count: 29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3525 (OldWALsCleaner-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b2e289 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.deleteFile(LogCleaner.java:172) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.lambda$createOldWalsCleaner$1(LogCleaner.java:152) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner$$Lambda$598/0x00007f205cacf298.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3526 (master/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3528 (snapshot-hfile-cleaner-cache-refresher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3529 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cebe51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$1.run(HFileCleaner.java:254) Thread 3532 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58e9f6a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$2.run(HFileCleaner.java:269) Thread 3482 (RS:0;5ed4808ef0e6:33745): State: TIMED_WAITING Blocked count: 1392 Waited count: 2760 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:906) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) java.base@17.0.11/java.security.AccessController.executePrivileged(AccessController.java:776) java.base@17.0.11/java.security.AccessController.doPrivileged(AccessController.java:399) java.base@17.0.11/javax.security.auth.Subject.doAs(Subject.java:376) app//org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) app//org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3533 (BootstrapNodeManager): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3535 (HMaster-EventLoopGroup-23-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3536 (RegionServerTracker-0): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6aa13e91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3537 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins.hfs.10): State: TIMED_WAITING Blocked count: 312 Waited count: 313 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3538 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 310 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3539 (JvmPauseMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 281 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:148) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3540 (RS:0;5ed4808ef0e6:33745-longCompactions-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a8b6720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3541 (regionserver/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 258 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3543 (regionserver/5ed4808ef0e6:0.logRoller): State: TIMED_WAITING Blocked count: 0 Waited count: 1398 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3545 (MemStoreFlusher.0): State: TIMED_WAITING Blocked count: 0 Waited count: 1398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:77) app//org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:323) Thread 3544 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@237b99c1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3542 (regionserver/5ed4808ef0e6:0.leaseChecker): State: TIMED_WAITING Blocked count: 0 Waited count: 1398 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Thread 3548 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51900 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3549 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 8 Waited count: 9 Waiting on java.util.ArrayDeque@566951c3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3550 (LeaseRenewer:jenkins.hfs.10@localhost:34141): State: TIMED_WAITING Blocked count: 4 Waited count: 145 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3551 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73c7bfc9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3560 (MiniHBaseClusterRegionServer-EventLoopGroup-24-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3561 (RS_OPEN_META-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33092bee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3562 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51908 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3563 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 18 Waited count: 19 Waiting on java.util.ArrayDeque@4cc309df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3564 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.meta): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5038aecd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3567 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ecf6af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3568 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3569 (HMaster-EventLoopGroup-23-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3570 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3571 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@303a142a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3572 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e18a26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3580 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2db0d51d): State: TIMED_WAITING Blocked count: 5 Waited count: 278 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3581 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3582 (pool-1206-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3583 (qtp1341535794-3583): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3584 (qtp1341535794-3584): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3585 (qtp1341535794-3585): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3586 (qtp1341535794-3586): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3587 (qtp1341535794-3587-acceptor-0@3dd8f85e-ServerConnector@5cf1b482{HTTP/1.1, (http/1.1)}{localhost:33319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3588 (qtp1341535794-3588): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3589 (qtp1341535794-3589): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3590 (qtp1341535794-3590): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3591 (Session-HouseKeeper-1a5d7d7b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3592 (pool-1207-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3599 (FSEditLogAsync): State: WAITING Blocked count: 1 Waited count: 200 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@501965e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3601 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3602 (IPC Server idle connection scanner for port 37411): State: TIMED_WAITING Blocked count: 1 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3604 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3607 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@16dcedcf): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3608 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3595 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2cab800b): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3593 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3594 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 13580 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3597 (Block report processor): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3441d181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3603 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3600 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3610 (IPC Server handler 0 on default port 37411): State: TIMED_WAITING Blocked count: 4 Waited count: 269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3611 (IPC Server handler 1 on default port 37411): State: TIMED_WAITING Blocked count: 0 Waited count: 267 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3612 (IPC Server handler 2 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 266 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3613 (IPC Server handler 3 on default port 37411): State: TIMED_WAITING Blocked count: 2 Waited count: 270 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3614 (IPC Server handler 4 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3615 (pool-1212-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3617 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@45bccaf9): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3618 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@4915eba0): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3619 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@75ca663c): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3620 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@1d86b1c2): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3621 (CacheReplicationMonitor(449845788)): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3631 (pool-1218-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3632 (qtp1777667352-3632): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3633 (qtp1777667352-3633-acceptor-0@34a75aa7-ServerConnector@5b981b4e{HTTP/1.1, (http/1.1)}{localhost:43351}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3634 (qtp1777667352-3634): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3635 (qtp1777667352-3635): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3636 (Session-HouseKeeper-67fcf623-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3637 (nioEventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3638 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@64c83422): State: TIMED_WAITING Blocked count: 7 Waited count: 277 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3640 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3641 (IPC Server idle connection scanner for port 41981): State: TIMED_WAITING Blocked count: 1 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3643 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3646 (Command processor): State: WAITING Blocked count: 0 Waited count: 48 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e00196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3647 (BP-493474764-172.17.0.2-1732148448119 heartbeating to localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 116 Waited count: 161 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3648 (pool-1220-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3630 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@68bc995a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3642 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3639 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3651 (IPC Server handler 0 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3652 (IPC Server handler 1 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3653 (IPC Server handler 2 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3654 (IPC Server handler 3 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3655 (IPC Server handler 4 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3649 (IPC Client (1040632728) connection to localhost/127.0.0.1:37411 from jenkins): State: TIMED_WAITING Blocked count: 78 Waited count: 79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3650 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 79 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3660 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3661 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3665 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3667 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3671 (pool-1215-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3675 (java.util.concurrent.ThreadPoolExecutor$Worker@f1b5be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3676 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3678 (LeaseRenewer:jenkins@localhost:37411): State: TIMED_WAITING Blocked count: 4 Waited count: 146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3683 (HMaster-EventLoopGroup-25-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3684 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3685 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bffda6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3686 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e6696af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3688 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d2232ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3689 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61f34110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3690 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.Semaphore$NonfairSync@348ac3f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3691 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e13e821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3692 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3693 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3694 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@795b2be5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3695 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1b2cea74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3696 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a1f5bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3697 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2095fd85 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3700 (MiniHBaseClusterRegionServer-EventLoopGroup-26-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3698 (M:0;5ed4808ef0e6:34653): State: TIMED_WAITING Blocked count: 87 Waited count: 253 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$559/0x00007f205ca8e370.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) Thread 3734 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 1351 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3743 (MiniHBaseClusterRegionServer-EventLoopGroup-26-2): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.CompletableFuture$Signaller@7cf3dec4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.fetchPeerAddresses(HBaseReplicationEndpoint.java:203) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.chooseSinks(HBaseReplicationEndpoint.java:211) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.reportBadSink(HBaseReplicationEndpoint.java:257) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.onReplicateWALEntryException(HBaseInterClusterReplicationEndpoint.java:558) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.lambda$replicateEntries$2(HBaseInterClusterReplicationEndpoint.java:541) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint$$Lambda$1181/0x00007f205cd0d2d0.accept(Unknown Source) app//org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) app//org.apache.hadoop.hbase.util.FutureUtils$$Lambda$432/0x00007f205c9c66f0.accept(Unknown Source) java.base@17.0.11/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) java.base@17.0.11/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) java.base@17.0.11/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) java.base@17.0.11/java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) Thread 3746 (AsyncFSWAL-0-hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData-prefix:5ed4808ef0e6,34653,1732148450571): State: WAITING Blocked count: 0 Waited count: 222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc8576d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3760 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3778 (HMaster-EventLoopGroup-25-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3779 (RegionServerTracker-0): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74ad5a4d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3787 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9804ef Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3799 (MiniHBaseClusterRegionServer-EventLoopGroup-26-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3806 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12554d2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3808 (HMaster-EventLoopGroup-25-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3809 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3810 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51c15b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3811 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fa00c5e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3824 (RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 18 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23f3d639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3843 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3844 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3859 (RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f387c04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3862 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.rep): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32c721c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3866 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 4 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@170f29bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3868 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3869 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: TIMED_WAITING Blocked count: 187 Waited count: 311 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) Thread 3870 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.CompletableFuture$Signaller@31911a48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) Thread 3964 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4191 (RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76e4d6fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4195 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4202 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T00:23:09,170 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:09,457 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:23:14,183 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:14,217 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:23:19,303 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:20,140 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:23:24,528 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:29,839 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:35,245 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:40,753 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:44,217 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:23:46,357 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:50,140 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:23:52,061 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:23:54,457 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:23:57,865 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:03,773 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5ed4808ef0e6:34653 451 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@d1db0eb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 26 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a794780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3799 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 39 Waiting on java.util.concurrent.CountDownLatch$Sync@5d70508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 938 Waited count: 1770 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) app//org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:221) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) Thread 32 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@53047f3c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 34 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1301779b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 44 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 137 (ForkJoinPool-2-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 152 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@20ea4193 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48c23f15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 38 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@3bb7d7bf): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 179 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418d319b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 36795 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 285 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f55408b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 301 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12b3f654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 303 (RPCClient-NioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 304 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 305 (RPCClient-NioEventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 242 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 36613 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 306 (RPCClient-NioEventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 307 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 308 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ec9cda9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 309 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56603a02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 318 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 419 (HMaster-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (MiniHBaseClusterRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (MiniHBaseClusterRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 496 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53b4795d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (HMaster-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e383429 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 535 (MiniHBaseClusterRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e2c1ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-4-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (HMaster-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-4-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-4-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 641 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 549 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39cb992f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 550 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49c561da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-4-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (RPCClient-NioEventLoopGroup-4-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (RPCClient-NioEventLoopGroup-4-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (RPCClient-NioEventLoopGroup-4-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (RPCClient-NioEventLoopGroup-4-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (RPCClient-NioEventLoopGroup-4-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (RPCClient-NioEventLoopGroup-4-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 678 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 695 (MiniHBaseClusterRegionServer-EventLoopGroup-8-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 724 (MiniHBaseClusterRegionServer-EventLoopGroup-8-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 744 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56c39db7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 754 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 763 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 769 (MiniHBaseClusterRegionServer-EventLoopGroup-8-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 825 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 826 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 828 (RPCClient-NioEventLoopGroup-4-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 829 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 830 (RPCClient-NioEventLoopGroup-4-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 831 (RPCClient-NioEventLoopGroup-4-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 967 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1070 (HMaster-EventLoopGroup-9-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1102 (MiniHBaseClusterRegionServer-EventLoopGroup-10-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (MiniHBaseClusterRegionServer-EventLoopGroup-10-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1161 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d70bb7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1175 (HMaster-EventLoopGroup-9-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f59c6ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1200 (MiniHBaseClusterRegionServer-EventLoopGroup-10-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@153dd6c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (HMaster-EventLoopGroup-9-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 593 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 1211 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d5159c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1212 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fc6e35a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1222 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1324 (HMaster-EventLoopGroup-11-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1341 (MiniHBaseClusterRegionServer-EventLoopGroup-12-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1383 (MiniHBaseClusterRegionServer-EventLoopGroup-12-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1400 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ebb900e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1416 (HMaster-EventLoopGroup-11-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1427 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@254bded4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1439 (MiniHBaseClusterRegionServer-EventLoopGroup-12-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1446 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a6d5614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1448 (HMaster-EventLoopGroup-11-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1449 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 600 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 1450 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9b630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1451 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706d87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1722 (HMaster-EventLoopGroup-13-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1754 (MiniHBaseClusterRegionServer-EventLoopGroup-14-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1796 (MiniHBaseClusterRegionServer-EventLoopGroup-14-2): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1813 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bb6be23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1827 (HMaster-EventLoopGroup-13-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1836 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b7b8ab7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1852 (MiniHBaseClusterRegionServer-EventLoopGroup-14-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1859 (region-location-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1861 (HMaster-EventLoopGroup-13-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1862 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 423 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 1863 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27538485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1864 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@695a71ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1873 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1975 (HMaster-EventLoopGroup-15-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1992 (MiniHBaseClusterRegionServer-EventLoopGroup-16-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2034 (MiniHBaseClusterRegionServer-EventLoopGroup-16-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2051 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e437044 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2062 (HMaster-EventLoopGroup-15-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2074 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c03c07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2091 (MiniHBaseClusterRegionServer-EventLoopGroup-16-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2098 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@274f18d2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2100 (HMaster-EventLoopGroup-15-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2101 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 425 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 2102 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1da0e8f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2103 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58a47a50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2137 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2293 (region-location-1): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2400 (Abort regionserver monitor): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2482 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 47 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2587 (HMaster-EventLoopGroup-17-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2619 (MiniHBaseClusterRegionServer-EventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2661 (MiniHBaseClusterRegionServer-EventLoopGroup-18-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2678 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64006344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2692 (HMaster-EventLoopGroup-17-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2701 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8641a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2718 (MiniHBaseClusterRegionServer-EventLoopGroup-18-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2725 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be91210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2727 (HMaster-EventLoopGroup-17-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2728 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 376 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Thread 2729 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b746a76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2730 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c093c1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2739 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2841 (HMaster-EventLoopGroup-19-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2858 (MiniHBaseClusterRegionServer-EventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2900 (MiniHBaseClusterRegionServer-EventLoopGroup-20-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2917 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@233de56d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2931 (HMaster-EventLoopGroup-19-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2940 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871a88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2957 (MiniHBaseClusterRegionServer-EventLoopGroup-20-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2964 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3878eb34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (HMaster-EventLoopGroup-19-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2967 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 386 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 2968 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dc4d1e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2969 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219e57b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3065 (HMaster-EventLoopGroup-21-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3082 (MiniHBaseClusterRegionServer-EventLoopGroup-22-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3112 (MiniHBaseClusterRegionServer-EventLoopGroup-22-2): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3132 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79954a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3142 (HMaster-EventLoopGroup-21-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3151 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 3157 (MiniHBaseClusterRegionServer-EventLoopGroup-22-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3193 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3194 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3196 (HMaster-EventLoopGroup-21-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3311 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3312 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3313 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 3310 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 3314 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 3315 (SyncThread:0): State: WAITING Blocked count: 0 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d4bb5fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 3316 (ProcessThread(sid:0 cport:49683):): State: WAITING Blocked count: 0 Waited count: 334 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3b087a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 3317 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 341 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4daf4e66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 3318 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3326 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f770010): State: TIMED_WAITING Blocked count: 1 Waited count: 410 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3327 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3328 (pool-1100-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (qtp1377509192-3329): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3330 (qtp1377509192-3330): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3331 (qtp1377509192-3331): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3332 (qtp1377509192-3332): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3333 (qtp1377509192-3333-acceptor-0@5c3eb09e-ServerConnector@78a383c6{HTTP/1.1, (http/1.1)}{localhost:41239}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3334 (qtp1377509192-3334): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3335 (qtp1377509192-3335): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3336 (qtp1377509192-3336): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3337 (Session-HouseKeeper-2ecd32c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3338 (pool-1101-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3345 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46bc032c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3347 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3348 (IPC Server idle connection scanner for port 34141): State: TIMED_WAITING Blocked count: 1 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3350 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3353 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@687e5a89): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3354 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3341 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7d8449d8): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3339 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3340 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 19987 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3343 (Block report processor): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47a7a624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3349 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3346 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3356 (IPC Server handler 0 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 301 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3357 (IPC Server handler 1 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 303 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3358 (IPC Server handler 2 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 301 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3359 (IPC Server handler 3 on default port 34141): State: TIMED_WAITING Blocked count: 5 Waited count: 302 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3360 (IPC Server handler 4 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 300 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3361 (pool-1106-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3363 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6f4c8732): State: TIMED_WAITING Blocked count: 0 Waited count: 103 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3364 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@3dd894f0): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3365 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@a75ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3366 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@24b24059): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3367 (CacheReplicationMonitor(677237174)): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3377 (pool-1112-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3378 (qtp1577786156-3378): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3379 (qtp1577786156-3379-acceptor-0@d163538-ServerConnector@4c8a0263{HTTP/1.1, (http/1.1)}{localhost:40451}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3380 (qtp1577786156-3380): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3381 (qtp1577786156-3381): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3382 (Session-HouseKeeper-327d7abf-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (nioEventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3384 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5aa0588f): State: TIMED_WAITING Blocked count: 2 Waited count: 409 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3386 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3387 (IPC Server idle connection scanner for port 39975): State: TIMED_WAITING Blocked count: 1 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3389 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3392 (Command processor): State: WAITING Blocked count: 0 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3153d1dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3393 (BP-913626249-172.17.0.2-1732148442480 heartbeating to localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 114 Waited count: 184 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3394 (pool-1114-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3395 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins): State: TIMED_WAITING Blocked count: 89 Waited count: 90 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3376 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@86acec8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3396 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 90 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3388 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3385 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3397 (IPC Server handler 0 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3398 (IPC Server handler 1 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 209 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3399 (IPC Server handler 2 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3400 (IPC Server handler 3 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3401 (IPC Server handler 4 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3406 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3407 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3412 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3413 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3417 (pool-1109-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3420 (java.util.concurrent.ThreadPoolExecutor$Worker@6d2378c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3421 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3423 (LeaseRenewer:jenkins@localhost:34141): State: TIMED_WAITING Blocked count: 6 Waited count: 216 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3429 (HMaster-EventLoopGroup-23-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3430 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3431 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c78661c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3432 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3434 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3435 (zk-event-processor-pool-0): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a467ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3436 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3437 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3438 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3439 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3440 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3441 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3442 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3443 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3444 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3445 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3446 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3447 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3448 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 46 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3450 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f5fd45d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3451 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@43e2a383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3452 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 48 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c257565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3453 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1962 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255d9f72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3454 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3455 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3456 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12690708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3457 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7580c67d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3458 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c6f71b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3459 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7235f80f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3462 (MiniHBaseClusterRegionServer-EventLoopGroup-24-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3465 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28bef006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3466 (zk-event-processor-pool-0): State: WAITING Blocked count: 14 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bfe6842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3467 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 3469 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3471 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3472 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@36063768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3473 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 30 Waited count: 37 Waiting on java.util.concurrent.Semaphore$NonfairSync@71197954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3474 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 40 Waited count: 48 Waiting on java.util.concurrent.Semaphore$NonfairSync@2800e149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3475 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219f8229 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3476 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33745): State: WAITING Blocked count: 4 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3477 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33745): State: WAITING Blocked count: 2 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3478 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ccc525c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3479 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3369e225 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3480 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 14 Waited count: 30 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ebaa0ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3481 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62f39c0d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3460 (M:0;5ed4808ef0e6:35861): State: TIMED_WAITING Blocked count: 1 Waited count: 2020 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:625) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3495 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 2010 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3504 (MiniHBaseClusterRegionServer-EventLoopGroup-24-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3505 (DataXceiver for client DFSClient_NONMAPREDUCE_742485806_20 at /127.0.0.1:51882 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3506 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 107 Waited count: 108 Waiting on java.util.ArrayDeque@5b538681 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3507 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData-prefix:5ed4808ef0e6,35861,1732148444638): State: WAITING Blocked count: 0 Waited count: 237 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25be6a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3510 (master:store-Flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor.flushLoop(MasterRegionFlusherAndCompactor.java:200) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor$$Lambda$479/0x00007f205c9f9bd8.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3518 (ProcedureDispatcherTimeoutThread): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread.run(RemoteProcedureDispatcher.java:328) Thread 3519 (5ed4808ef0e6:35861): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b481a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.waitOnAssignQueue(AssignmentManager.java:2390) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.processAssignQueue(AssignmentManager.java:2412) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager$1.run(AssignmentManager.java:2352) Thread 3520 (normalizer-worker-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6df0091b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue.take(RegionNormalizerWorkQueue.java:146) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker.run(RegionNormalizerWorker.java:191) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3521 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3511 (ProcExecTimeout): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:274) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3512 (WorkerMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 42 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3513 (PEWorker-1): State: TIMED_WAITING Blocked count: 46 Waited count: 102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3514 (PEWorker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3515 (PEWorker-3): State: TIMED_WAITING Blocked count: 23 Waited count: 59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3516 (PEWorker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3517 (PEWorker-5): State: TIMED_WAITING Blocked count: 1 Waited count: 30 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3525 (OldWALsCleaner-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b2e289 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.deleteFile(LogCleaner.java:172) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.lambda$createOldWalsCleaner$1(LogCleaner.java:152) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner$$Lambda$598/0x00007f205cacf298.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3526 (master/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3528 (snapshot-hfile-cleaner-cache-refresher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3529 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cebe51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$1.run(HFileCleaner.java:254) Thread 3532 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58e9f6a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$2.run(HFileCleaner.java:269) Thread 3482 (RS:0;5ed4808ef0e6:33745): State: TIMED_WAITING Blocked count: 1980 Waited count: 3937 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:906) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) java.base@17.0.11/java.security.AccessController.executePrivileged(AccessController.java:776) java.base@17.0.11/java.security.AccessController.doPrivileged(AccessController.java:399) java.base@17.0.11/javax.security.auth.Subject.doAs(Subject.java:376) app//org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) app//org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3533 (BootstrapNodeManager): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3535 (HMaster-EventLoopGroup-23-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3536 (RegionServerTracker-0): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6aa13e91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3537 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins.hfs.10): State: TIMED_WAITING Blocked count: 336 Waited count: 337 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3538 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3539 (JvmPauseMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 401 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:148) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3540 (RS:0;5ed4808ef0e6:33745-longCompactions-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a8b6720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3541 (regionserver/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 351 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3543 (regionserver/5ed4808ef0e6:0.logRoller): State: TIMED_WAITING Blocked count: 0 Waited count: 1999 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3545 (MemStoreFlusher.0): State: TIMED_WAITING Blocked count: 0 Waited count: 1999 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:77) app//org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:323) Thread 3544 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@237b99c1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3542 (regionserver/5ed4808ef0e6:0.leaseChecker): State: TIMED_WAITING Blocked count: 0 Waited count: 1999 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Thread 3548 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51900 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3549 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.util.ArrayDeque@566951c3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3550 (LeaseRenewer:jenkins.hfs.10@localhost:34141): State: TIMED_WAITING Blocked count: 6 Waited count: 207 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3551 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73c7bfc9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3560 (MiniHBaseClusterRegionServer-EventLoopGroup-24-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3561 (RS_OPEN_META-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33092bee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3562 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51908 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3563 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 20 Waited count: 21 Waiting on java.util.ArrayDeque@4cc309df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3564 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.meta): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5038aecd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3567 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ecf6af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3568 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3569 (HMaster-EventLoopGroup-23-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3570 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3571 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@303a142a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3572 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e18a26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3580 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2db0d51d): State: TIMED_WAITING Blocked count: 5 Waited count: 398 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3581 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3582 (pool-1206-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3583 (qtp1341535794-3583): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3584 (qtp1341535794-3584): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3585 (qtp1341535794-3585): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3586 (qtp1341535794-3586): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3587 (qtp1341535794-3587-acceptor-0@3dd8f85e-ServerConnector@5cf1b482{HTTP/1.1, (http/1.1)}{localhost:33319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3588 (qtp1341535794-3588): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3589 (qtp1341535794-3589): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3590 (qtp1341535794-3590): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3591 (Session-HouseKeeper-1a5d7d7b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3592 (pool-1207-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3599 (FSEditLogAsync): State: WAITING Blocked count: 1 Waited count: 200 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@501965e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3601 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3602 (IPC Server idle connection scanner for port 37411): State: TIMED_WAITING Blocked count: 1 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3604 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3607 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@16dcedcf): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3608 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3595 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2cab800b): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3593 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3594 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 19451 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3597 (Block report processor): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3441d181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3603 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3600 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3610 (IPC Server handler 0 on default port 37411): State: TIMED_WAITING Blocked count: 4 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3611 (IPC Server handler 1 on default port 37411): State: TIMED_WAITING Blocked count: 0 Waited count: 328 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3612 (IPC Server handler 2 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 326 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3613 (IPC Server handler 3 on default port 37411): State: TIMED_WAITING Blocked count: 2 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3614 (IPC Server handler 4 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3615 (pool-1212-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3617 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@45bccaf9): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3618 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@4915eba0): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3619 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@75ca663c): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3620 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@1d86b1c2): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3621 (CacheReplicationMonitor(449845788)): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3631 (pool-1218-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3632 (qtp1777667352-3632): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3633 (qtp1777667352-3633-acceptor-0@34a75aa7-ServerConnector@5b981b4e{HTTP/1.1, (http/1.1)}{localhost:43351}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3634 (qtp1777667352-3634): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3635 (qtp1777667352-3635): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3636 (Session-HouseKeeper-67fcf623-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3637 (nioEventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3638 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@64c83422): State: TIMED_WAITING Blocked count: 7 Waited count: 397 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3640 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3641 (IPC Server idle connection scanner for port 41981): State: TIMED_WAITING Blocked count: 1 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3643 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3646 (Command processor): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e00196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3647 (BP-493474764-172.17.0.2-1732148448119 heartbeating to localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 136 Waited count: 201 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3648 (pool-1220-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3630 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@68bc995a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3642 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3639 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3651 (IPC Server handler 0 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 200 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3652 (IPC Server handler 1 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 200 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3653 (IPC Server handler 2 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3654 (IPC Server handler 3 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 200 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3655 (IPC Server handler 4 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 202 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3649 (IPC Client (1040632728) connection to localhost/127.0.0.1:37411 from jenkins): State: TIMED_WAITING Blocked count: 98 Waited count: 99 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3650 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 99 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3660 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3661 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3665 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3667 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3671 (pool-1215-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3675 (java.util.concurrent.ThreadPoolExecutor$Worker@f1b5be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3676 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3678 (LeaseRenewer:jenkins@localhost:37411): State: TIMED_WAITING Blocked count: 6 Waited count: 210 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3683 (HMaster-EventLoopGroup-25-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3684 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3685 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bffda6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3686 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e6696af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3688 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d2232ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3689 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61f34110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3690 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.Semaphore$NonfairSync@348ac3f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3691 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e13e821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3692 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3693 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3694 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@795b2be5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3695 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1b2cea74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3696 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a1f5bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3697 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2095fd85 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3700 (MiniHBaseClusterRegionServer-EventLoopGroup-26-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3698 (M:0;5ed4808ef0e6:34653): State: TIMED_WAITING Blocked count: 87 Waited count: 253 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$559/0x00007f205ca8e370.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) Thread 3734 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 1952 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3743 (MiniHBaseClusterRegionServer-EventLoopGroup-26-2): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.CompletableFuture$Signaller@7cf3dec4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.fetchPeerAddresses(HBaseReplicationEndpoint.java:203) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.chooseSinks(HBaseReplicationEndpoint.java:211) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.reportBadSink(HBaseReplicationEndpoint.java:257) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.onReplicateWALEntryException(HBaseInterClusterReplicationEndpoint.java:558) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.lambda$replicateEntries$2(HBaseInterClusterReplicationEndpoint.java:541) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint$$Lambda$1181/0x00007f205cd0d2d0.accept(Unknown Source) app//org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) app//org.apache.hadoop.hbase.util.FutureUtils$$Lambda$432/0x00007f205c9c66f0.accept(Unknown Source) java.base@17.0.11/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) java.base@17.0.11/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) java.base@17.0.11/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) java.base@17.0.11/java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) Thread 3746 (AsyncFSWAL-0-hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData-prefix:5ed4808ef0e6,34653,1732148450571): State: WAITING Blocked count: 0 Waited count: 222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc8576d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3760 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3778 (HMaster-EventLoopGroup-25-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3779 (RegionServerTracker-0): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74ad5a4d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3787 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9804ef Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3799 (MiniHBaseClusterRegionServer-EventLoopGroup-26-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3806 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12554d2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3808 (HMaster-EventLoopGroup-25-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3809 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3810 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51c15b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3811 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fa00c5e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3824 (RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 18 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23f3d639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3843 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3844 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3859 (RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f387c04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3862 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.rep): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32c721c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3866 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 4 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@170f29bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3868 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3869 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: TIMED_WAITING Blocked count: 220 Waited count: 366 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) Thread 3870 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.CompletableFuture$Signaller@31911a48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) Thread 3964 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4191 (RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76e4d6fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4202 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-21T00:24:09,778 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:14,217 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:24:15,883 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:20,140 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:24:22,086 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:28,391 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:34,805 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:39,457 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:24:41,325 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:44,218 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:24:47,158 WARN [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 10, running: 0 2024-11-21T00:24:47,929 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:24:50,141 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:24:54,657 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:01,464 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5ed4808ef0e6:34653 451 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@d1db0eb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 26 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a794780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4401 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 45 Waiting on java.util.concurrent.CountDownLatch$Sync@5e05ced7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 938 Waited count: 1771 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) app//org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:221) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) Thread 32 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@53047f3c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 34 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1301779b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 44 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 88 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 152 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@20ea4193 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48c23f15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 44 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@3bb7d7bf): State: TIMED_WAITING Blocked count: 0 Waited count: 144 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418d319b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 42833 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 285 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f55408b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 301 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12b3f654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 303 (RPCClient-NioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 304 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 305 (RPCClient-NioEventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 242 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 42651 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 306 (RPCClient-NioEventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 307 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 765 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 308 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ec9cda9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 309 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56603a02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 318 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 86 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 419 (HMaster-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (MiniHBaseClusterRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (MiniHBaseClusterRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 496 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53b4795d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (HMaster-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e383429 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 535 (MiniHBaseClusterRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e2c1ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-4-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (HMaster-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-4-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-4-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 757 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Thread 549 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39cb992f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 550 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49c561da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-4-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (RPCClient-NioEventLoopGroup-4-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (RPCClient-NioEventLoopGroup-4-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (RPCClient-NioEventLoopGroup-4-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (RPCClient-NioEventLoopGroup-4-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (RPCClient-NioEventLoopGroup-4-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (RPCClient-NioEventLoopGroup-4-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 678 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 695 (MiniHBaseClusterRegionServer-EventLoopGroup-8-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 724 (MiniHBaseClusterRegionServer-EventLoopGroup-8-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 744 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56c39db7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 754 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 763 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 769 (MiniHBaseClusterRegionServer-EventLoopGroup-8-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 825 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 826 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 828 (RPCClient-NioEventLoopGroup-4-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 829 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 830 (RPCClient-NioEventLoopGroup-4-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 831 (RPCClient-NioEventLoopGroup-4-16): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 967 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1070 (HMaster-EventLoopGroup-9-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1102 (MiniHBaseClusterRegionServer-EventLoopGroup-10-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (MiniHBaseClusterRegionServer-EventLoopGroup-10-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1161 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d70bb7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1175 (HMaster-EventLoopGroup-9-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f59c6ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1200 (MiniHBaseClusterRegionServer-EventLoopGroup-10-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@153dd6c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (HMaster-EventLoopGroup-9-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 1211 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d5159c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1212 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fc6e35a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1222 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1324 (HMaster-EventLoopGroup-11-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1341 (MiniHBaseClusterRegionServer-EventLoopGroup-12-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1383 (MiniHBaseClusterRegionServer-EventLoopGroup-12-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1400 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ebb900e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1416 (HMaster-EventLoopGroup-11-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1427 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@254bded4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1439 (MiniHBaseClusterRegionServer-EventLoopGroup-12-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1446 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a6d5614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1448 (HMaster-EventLoopGroup-11-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1449 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 713 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 1450 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9b630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1451 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706d87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1722 (HMaster-EventLoopGroup-13-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1754 (MiniHBaseClusterRegionServer-EventLoopGroup-14-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1796 (MiniHBaseClusterRegionServer-EventLoopGroup-14-2): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1813 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bb6be23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1827 (HMaster-EventLoopGroup-13-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1836 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b7b8ab7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1852 (MiniHBaseClusterRegionServer-EventLoopGroup-14-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1859 (region-location-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1861 (HMaster-EventLoopGroup-13-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1862 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 536 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 1863 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27538485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1864 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@695a71ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1873 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1975 (HMaster-EventLoopGroup-15-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1992 (MiniHBaseClusterRegionServer-EventLoopGroup-16-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2034 (MiniHBaseClusterRegionServer-EventLoopGroup-16-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2051 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e437044 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2062 (HMaster-EventLoopGroup-15-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2074 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c03c07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2091 (MiniHBaseClusterRegionServer-EventLoopGroup-16-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2098 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@274f18d2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2100 (HMaster-EventLoopGroup-15-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2101 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 540 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 2102 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1da0e8f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2103 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58a47a50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2137 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2293 (region-location-1): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2400 (Abort regionserver monitor): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2482 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2587 (HMaster-EventLoopGroup-17-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2619 (MiniHBaseClusterRegionServer-EventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2661 (MiniHBaseClusterRegionServer-EventLoopGroup-18-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2678 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64006344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2692 (HMaster-EventLoopGroup-17-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2701 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8641a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2718 (MiniHBaseClusterRegionServer-EventLoopGroup-18-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2725 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be91210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2727 (HMaster-EventLoopGroup-17-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2728 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 488 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 2729 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b746a76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2730 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c093c1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2739 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2841 (HMaster-EventLoopGroup-19-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2858 (MiniHBaseClusterRegionServer-EventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2900 (MiniHBaseClusterRegionServer-EventLoopGroup-20-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2917 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@233de56d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2931 (HMaster-EventLoopGroup-19-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2940 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871a88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2957 (MiniHBaseClusterRegionServer-EventLoopGroup-20-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2964 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3878eb34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (HMaster-EventLoopGroup-19-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2967 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 502 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Thread 2968 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dc4d1e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2969 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219e57b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3065 (HMaster-EventLoopGroup-21-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3082 (MiniHBaseClusterRegionServer-EventLoopGroup-22-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3112 (MiniHBaseClusterRegionServer-EventLoopGroup-22-2): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3132 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79954a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3142 (HMaster-EventLoopGroup-21-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3151 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 3157 (MiniHBaseClusterRegionServer-EventLoopGroup-22-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3193 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3194 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3196 (HMaster-EventLoopGroup-21-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3311 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3312 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3313 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 3310 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 3314 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 3315 (SyncThread:0): State: WAITING Blocked count: 0 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d4bb5fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 3316 (ProcessThread(sid:0 cport:49683):): State: WAITING Blocked count: 0 Waited count: 357 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3b087a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 3317 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 364 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4daf4e66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 3318 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3326 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f770010): State: TIMED_WAITING Blocked count: 1 Waited count: 531 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3327 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3328 (pool-1100-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (qtp1377509192-3329): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3330 (qtp1377509192-3330): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3331 (qtp1377509192-3331): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3332 (qtp1377509192-3332): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3333 (qtp1377509192-3333-acceptor-0@5c3eb09e-ServerConnector@78a383c6{HTTP/1.1, (http/1.1)}{localhost:41239}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3334 (qtp1377509192-3334): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3335 (qtp1377509192-3335): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3336 (qtp1377509192-3336): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3337 (Session-HouseKeeper-2ecd32c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3338 (pool-1101-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3345 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46bc032c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3347 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3348 (IPC Server idle connection scanner for port 34141): State: TIMED_WAITING Blocked count: 1 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3350 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3353 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@687e5a89): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3354 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3341 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7d8449d8): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3339 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3340 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 25867 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3343 (Block report processor): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47a7a624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3349 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3346 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3356 (IPC Server handler 0 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3357 (IPC Server handler 1 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 365 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3358 (IPC Server handler 2 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3359 (IPC Server handler 3 on default port 34141): State: TIMED_WAITING Blocked count: 5 Waited count: 364 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3360 (IPC Server handler 4 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 362 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3361 (pool-1106-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3363 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6f4c8732): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3364 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@3dd894f0): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3365 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@a75ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3366 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@24b24059): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3367 (CacheReplicationMonitor(677237174)): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3377 (pool-1112-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3378 (qtp1577786156-3378): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3379 (qtp1577786156-3379-acceptor-0@d163538-ServerConnector@4c8a0263{HTTP/1.1, (http/1.1)}{localhost:40451}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3380 (qtp1577786156-3380): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3381 (qtp1577786156-3381): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3382 (Session-HouseKeeper-327d7abf-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (nioEventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3384 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5aa0588f): State: TIMED_WAITING Blocked count: 2 Waited count: 530 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3386 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3387 (IPC Server idle connection scanner for port 39975): State: TIMED_WAITING Blocked count: 1 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3389 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3392 (Command processor): State: WAITING Blocked count: 0 Waited count: 91 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3153d1dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3393 (BP-913626249-172.17.0.2-1732148442480 heartbeating to localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 135 Waited count: 226 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3394 (pool-1114-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3395 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins): State: TIMED_WAITING Blocked count: 110 Waited count: 111 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3376 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@86acec8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3396 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3388 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3385 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3397 (IPC Server handler 0 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3398 (IPC Server handler 1 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3399 (IPC Server handler 2 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3400 (IPC Server handler 3 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3401 (IPC Server handler 4 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 269 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3406 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3407 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3412 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3413 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3417 (pool-1109-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3420 (java.util.concurrent.ThreadPoolExecutor$Worker@6d2378c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3421 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3423 (LeaseRenewer:jenkins@localhost:34141): State: TIMED_WAITING Blocked count: 8 Waited count: 280 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3429 (HMaster-EventLoopGroup-23-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3430 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3431 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c78661c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3432 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3434 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3435 (zk-event-processor-pool-0): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a467ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3436 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3437 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3438 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3439 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3440 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3441 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3442 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3443 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3444 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3445 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3446 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3447 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3448 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 49 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3450 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f5fd45d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3451 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@43e2a383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3452 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 50 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c257565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3453 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 2551 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255d9f72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3454 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3455 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3456 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12690708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3457 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7580c67d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3458 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c6f71b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3459 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7235f80f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3462 (MiniHBaseClusterRegionServer-EventLoopGroup-24-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3465 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28bef006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3466 (zk-event-processor-pool-0): State: WAITING Blocked count: 14 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bfe6842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3467 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 3469 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3471 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3472 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@36063768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3473 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 30 Waited count: 37 Waiting on java.util.concurrent.Semaphore$NonfairSync@71197954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3474 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 40 Waited count: 48 Waiting on java.util.concurrent.Semaphore$NonfairSync@2800e149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3475 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219f8229 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3476 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33745): State: WAITING Blocked count: 4 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3477 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33745): State: WAITING Blocked count: 2 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3478 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ccc525c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3479 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3369e225 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3480 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 14 Waited count: 30 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ebaa0ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3481 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62f39c0d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3460 (M:0;5ed4808ef0e6:35861): State: TIMED_WAITING Blocked count: 1 Waited count: 2622 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:625) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3495 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 2611 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3504 (MiniHBaseClusterRegionServer-EventLoopGroup-24-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3505 (DataXceiver for client DFSClient_NONMAPREDUCE_742485806_20 at /127.0.0.1:51882 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3506 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 109 Waited count: 110 Waiting on java.util.ArrayDeque@5b538681 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3507 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData-prefix:5ed4808ef0e6,35861,1732148444638): State: WAITING Blocked count: 0 Waited count: 237 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25be6a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3510 (master:store-Flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor.flushLoop(MasterRegionFlusherAndCompactor.java:200) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor$$Lambda$479/0x00007f205c9f9bd8.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3518 (ProcedureDispatcherTimeoutThread): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread.run(RemoteProcedureDispatcher.java:328) Thread 3519 (5ed4808ef0e6:35861): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b481a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.waitOnAssignQueue(AssignmentManager.java:2390) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.processAssignQueue(AssignmentManager.java:2412) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager$1.run(AssignmentManager.java:2352) Thread 3520 (normalizer-worker-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6df0091b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue.take(RegionNormalizerWorkQueue.java:146) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker.run(RegionNormalizerWorker.java:191) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3521 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3511 (ProcExecTimeout): State: TIMED_WAITING Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3512 (WorkerMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3513 (PEWorker-1): State: TIMED_WAITING Blocked count: 46 Waited count: 103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3514 (PEWorker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3515 (PEWorker-3): State: TIMED_WAITING Blocked count: 23 Waited count: 60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3516 (PEWorker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3517 (PEWorker-5): State: TIMED_WAITING Blocked count: 1 Waited count: 31 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3525 (OldWALsCleaner-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b2e289 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.deleteFile(LogCleaner.java:172) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.lambda$createOldWalsCleaner$1(LogCleaner.java:152) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner$$Lambda$598/0x00007f205cacf298.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3526 (master/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3528 (snapshot-hfile-cleaner-cache-refresher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3529 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cebe51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$1.run(HFileCleaner.java:254) Thread 3532 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58e9f6a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$2.run(HFileCleaner.java:269) Thread 3482 (RS:0;5ed4808ef0e6:33745): State: TIMED_WAITING Blocked count: 2569 Waited count: 5115 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:906) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) java.base@17.0.11/java.security.AccessController.executePrivileged(AccessController.java:776) java.base@17.0.11/java.security.AccessController.doPrivileged(AccessController.java:399) java.base@17.0.11/javax.security.auth.Subject.doAs(Subject.java:376) app//org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) app//org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3533 (BootstrapNodeManager): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3535 (HMaster-EventLoopGroup-23-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3536 (RegionServerTracker-0): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6aa13e91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3537 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins.hfs.10): State: TIMED_WAITING Blocked count: 356 Waited count: 357 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3538 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3539 (JvmPauseMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 522 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:148) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3540 (RS:0;5ed4808ef0e6:33745-longCompactions-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a8b6720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3541 (regionserver/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 454 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3543 (regionserver/5ed4808ef0e6:0.logRoller): State: TIMED_WAITING Blocked count: 0 Waited count: 2600 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3545 (MemStoreFlusher.0): State: TIMED_WAITING Blocked count: 0 Waited count: 2600 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:77) app//org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:323) Thread 3544 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@237b99c1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3542 (regionserver/5ed4808ef0e6:0.leaseChecker): State: TIMED_WAITING Blocked count: 0 Waited count: 2601 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Thread 3548 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51900 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3549 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.util.ArrayDeque@566951c3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3550 (LeaseRenewer:jenkins.hfs.10@localhost:34141): State: TIMED_WAITING Blocked count: 8 Waited count: 269 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3551 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73c7bfc9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3560 (MiniHBaseClusterRegionServer-EventLoopGroup-24-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3561 (RS_OPEN_META-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33092bee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3562 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51908 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3563 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 22 Waited count: 23 Waiting on java.util.ArrayDeque@4cc309df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3564 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.meta): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5038aecd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3567 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ecf6af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3568 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3569 (HMaster-EventLoopGroup-23-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3570 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3571 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@303a142a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3572 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e18a26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3580 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2db0d51d): State: TIMED_WAITING Blocked count: 5 Waited count: 519 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3581 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3582 (pool-1206-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3583 (qtp1341535794-3583): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3584 (qtp1341535794-3584): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3585 (qtp1341535794-3585): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3586 (qtp1341535794-3586): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3587 (qtp1341535794-3587-acceptor-0@3dd8f85e-ServerConnector@5cf1b482{HTTP/1.1, (http/1.1)}{localhost:33319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3588 (qtp1341535794-3588): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3589 (qtp1341535794-3589): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3590 (qtp1341535794-3590): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3591 (Session-HouseKeeper-1a5d7d7b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3592 (pool-1207-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3599 (FSEditLogAsync): State: WAITING Blocked count: 1 Waited count: 200 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@501965e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3601 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3602 (IPC Server idle connection scanner for port 37411): State: TIMED_WAITING Blocked count: 1 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3604 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3607 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@16dcedcf): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3608 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3595 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2cab800b): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3593 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 87 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3594 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 25310 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3597 (Block report processor): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3441d181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3603 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3600 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3610 (IPC Server handler 0 on default port 37411): State: TIMED_WAITING Blocked count: 4 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3611 (IPC Server handler 1 on default port 37411): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3612 (IPC Server handler 2 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 387 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3613 (IPC Server handler 3 on default port 37411): State: TIMED_WAITING Blocked count: 2 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3614 (IPC Server handler 4 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 390 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3615 (pool-1212-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3617 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@45bccaf9): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3618 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@4915eba0): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3619 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@75ca663c): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3620 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@1d86b1c2): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3621 (CacheReplicationMonitor(449845788)): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3631 (pool-1218-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3632 (qtp1777667352-3632): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3633 (qtp1777667352-3633-acceptor-0@34a75aa7-ServerConnector@5b981b4e{HTTP/1.1, (http/1.1)}{localhost:43351}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3634 (qtp1777667352-3634): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3635 (qtp1777667352-3635): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3636 (Session-HouseKeeper-67fcf623-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3637 (nioEventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3638 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@64c83422): State: TIMED_WAITING Blocked count: 7 Waited count: 518 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3640 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3641 (IPC Server idle connection scanner for port 41981): State: TIMED_WAITING Blocked count: 1 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3643 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3646 (Command processor): State: WAITING Blocked count: 0 Waited count: 89 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e00196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3647 (BP-493474764-172.17.0.2-1732148448119 heartbeating to localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 157 Waited count: 243 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3648 (pool-1220-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3630 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@68bc995a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3642 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3639 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3651 (IPC Server handler 0 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3652 (IPC Server handler 1 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3653 (IPC Server handler 2 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3654 (IPC Server handler 3 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3655 (IPC Server handler 4 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 263 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3649 (IPC Client (1040632728) connection to localhost/127.0.0.1:37411 from jenkins): State: TIMED_WAITING Blocked count: 119 Waited count: 120 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3650 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 120 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3660 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3661 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3665 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3667 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3671 (pool-1215-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3675 (java.util.concurrent.ThreadPoolExecutor$Worker@f1b5be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3676 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3678 (LeaseRenewer:jenkins@localhost:37411): State: TIMED_WAITING Blocked count: 8 Waited count: 274 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3683 (HMaster-EventLoopGroup-25-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3684 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3685 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bffda6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3686 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e6696af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3688 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d2232ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3689 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61f34110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3690 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.Semaphore$NonfairSync@348ac3f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3691 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e13e821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3692 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3693 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3694 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@795b2be5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3695 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1b2cea74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3696 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a1f5bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3697 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2095fd85 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3700 (MiniHBaseClusterRegionServer-EventLoopGroup-26-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3698 (M:0;5ed4808ef0e6:34653): State: TIMED_WAITING Blocked count: 87 Waited count: 253 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$559/0x00007f205ca8e370.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) Thread 3734 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 2554 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3743 (MiniHBaseClusterRegionServer-EventLoopGroup-26-2): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.CompletableFuture$Signaller@7cf3dec4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.fetchPeerAddresses(HBaseReplicationEndpoint.java:203) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.chooseSinks(HBaseReplicationEndpoint.java:211) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.reportBadSink(HBaseReplicationEndpoint.java:257) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.onReplicateWALEntryException(HBaseInterClusterReplicationEndpoint.java:558) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.lambda$replicateEntries$2(HBaseInterClusterReplicationEndpoint.java:541) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint$$Lambda$1181/0x00007f205cd0d2d0.accept(Unknown Source) app//org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) app//org.apache.hadoop.hbase.util.FutureUtils$$Lambda$432/0x00007f205c9c66f0.accept(Unknown Source) java.base@17.0.11/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) java.base@17.0.11/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) java.base@17.0.11/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) java.base@17.0.11/java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) Thread 3746 (AsyncFSWAL-0-hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData-prefix:5ed4808ef0e6,34653,1732148450571): State: WAITING Blocked count: 0 Waited count: 222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc8576d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3760 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3778 (HMaster-EventLoopGroup-25-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3779 (RegionServerTracker-0): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74ad5a4d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3787 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9804ef Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3799 (MiniHBaseClusterRegionServer-EventLoopGroup-26-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3806 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12554d2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3808 (HMaster-EventLoopGroup-25-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3809 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3810 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51c15b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3811 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fa00c5e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3824 (RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 18 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23f3d639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3843 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3844 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3859 (RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f387c04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3862 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.rep): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32c721c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3866 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 4 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@170f29bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3868 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3869 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: TIMED_WAITING Blocked count: 247 Waited count: 411 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) Thread 3870 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.CompletableFuture$Signaller@31911a48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) Thread 3964 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4191 (RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76e4d6fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4202 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 4254 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:25:08,389 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:14,220 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:25:15,413 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:20,141 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:25:22,540 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:24,458 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:25:29,744 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:37,063 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:44,220 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:25:44,469 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:44,999 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=682.28 KB, freeSize=879.33 MB, max=880 MB, blockCount=4, accesses=6, hits=2, hitRatio=33.33%, , cachingAccesses=6, cachingHits=2, cachingHitsRatio=33.33%, evictions=29, evicted=0, evictedPerRun=0.0 2024-11-21T00:25:47,220 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=447B, Limit=268435456B Normal source for cluster 1: Total replicated edits: 1, current progress: walGroup [5ed4808ef0e6%2C33745%2C1732148444978]: currently replicating from: hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 at position: 687 2024-11-21T00:25:47,221 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] replication.ReplicationSinkServiceImpl$ReplicationStatisticsChore(116): Sink: age in ms of last applied edit: 0, total replicated edits: 2 2024-11-21T00:25:48,001 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:25:48,002 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-24-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48203, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:25:48,005 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:25:48,007 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-21T00:25:48,007 INFO [master/5ed4808ef0e6:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-21T00:25:48,007 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-21T00:25:48,010 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {5ed4808ef0e6=0} racks are {/default-rack=0} 2024-11-21T00:25:48,020 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 3 regions 2024-11-21T00:25:48,020 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-21T00:25:48,020 INFO [master/5ed4808ef0e6:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-21T00:25:48,020 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=3, number of hosts=1, number of racks=1 2024-11-21T00:25:48,023 INFO [master/5ed4808ef0e6:0.Chore.1 {}] balancer.StochasticLoadBalancer(357): Not running balancer because only 1 active regionserver(s) 2024-11-21T00:25:48,023 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-21T00:25:48,028 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table test because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-21T00:25:48,224 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 227011 ms 2024-11-21T00:25:50,141 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:25:51,973 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:25:59,156 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:replication,,1732148457012.ee3431ada32b00d5fdbcd917d1fe42f7. because ee3431ada32b00d5fdbcd917d1fe42f7/queue has an old edit so flush to free WALs after random delay 50794 ms 2024-11-21T00:25:59,581 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:26:07,288 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;5ed4808ef0e6:34653 451 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@d1db0eb Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 27 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@a794780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5002 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.CountDownLatch$Sync@5ad9da8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 20 (Time-limited test): State: RUNNABLE Blocked count: 938 Waited count: 1772 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) app//org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileCyclicReplication(TestMasterReplication.java:221) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) Thread 32 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@53047f3c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 34 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@1301779b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 44 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 152 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@20ea4193 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 186 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48c23f15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 212 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (org.apache.hadoop.hdfs.PeerCache@3bb7d7bf): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 4 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 244 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@418d319b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 243 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 48853 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 285 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 4 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f55408b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 301 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12b3f654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 303 (RPCClient-NioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 304 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 305 (RPCClient-NioEventLoopGroup-4-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 242 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 48670 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 306 (RPCClient-NioEventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 307 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 875 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 308 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ec9cda9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 309 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56603a02 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 318 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 98 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 419 (HMaster-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 436 (MiniHBaseClusterRegionServer-EventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 479 (MiniHBaseClusterRegionServer-EventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 496 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@53b4795d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (HMaster-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e383429 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 535 (MiniHBaseClusterRegionServer-EventLoopGroup-6-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e2c1ec2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (RPCClient-NioEventLoopGroup-4-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (HMaster-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-4-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 547 (RPCClient-NioEventLoopGroup-4-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (Time-limited test-SendThread(127.0.0.1:50128)): State: TIMED_WAITING Blocked count: 0 Waited count: 873 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 549 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39cb992f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 550 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49c561da Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 552 (RPCClient-NioEventLoopGroup-4-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (RPCClient-NioEventLoopGroup-4-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 554 (RPCClient-NioEventLoopGroup-4-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 586 (RPCClient-NioEventLoopGroup-4-10): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (RPCClient-NioEventLoopGroup-4-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (RPCClient-NioEventLoopGroup-4-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 599 (RPCClient-NioEventLoopGroup-4-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 678 (HMaster-EventLoopGroup-7-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 695 (MiniHBaseClusterRegionServer-EventLoopGroup-8-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 724 (MiniHBaseClusterRegionServer-EventLoopGroup-8-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 744 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56c39db7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 754 (HMaster-EventLoopGroup-7-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 763 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 769 (MiniHBaseClusterRegionServer-EventLoopGroup-8-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 825 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 826 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@589743a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 828 (RPCClient-NioEventLoopGroup-4-14): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 829 (HMaster-EventLoopGroup-7-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 830 (RPCClient-NioEventLoopGroup-4-15): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 831 (RPCClient-NioEventLoopGroup-4-16): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 967 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1070 (HMaster-EventLoopGroup-9-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1102 (MiniHBaseClusterRegionServer-EventLoopGroup-10-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (MiniHBaseClusterRegionServer-EventLoopGroup-10-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1161 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d70bb7f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1175 (HMaster-EventLoopGroup-9-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1184 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f59c6ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1200 (MiniHBaseClusterRegionServer-EventLoopGroup-10-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@153dd6c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (HMaster-EventLoopGroup-9-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 822 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 1211 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@77d5159c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1212 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fc6e35a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1222 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1324 (HMaster-EventLoopGroup-11-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1341 (MiniHBaseClusterRegionServer-EventLoopGroup-12-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1383 (MiniHBaseClusterRegionServer-EventLoopGroup-12-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1400 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ebb900e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1416 (HMaster-EventLoopGroup-11-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1427 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@254bded4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1439 (MiniHBaseClusterRegionServer-EventLoopGroup-12-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1446 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a6d5614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1448 (HMaster-EventLoopGroup-11-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1449 (Time-limited test-SendThread(127.0.0.1:63439)): State: TIMED_WAITING Blocked count: 0 Waited count: 827 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 1450 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9b630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1451 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@706d87 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1617 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 89 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1722 (HMaster-EventLoopGroup-13-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1754 (MiniHBaseClusterRegionServer-EventLoopGroup-14-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1796 (MiniHBaseClusterRegionServer-EventLoopGroup-14-2): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1813 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7bb6be23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1827 (HMaster-EventLoopGroup-13-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1836 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b7b8ab7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 1852 (MiniHBaseClusterRegionServer-EventLoopGroup-14-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1859 (region-location-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1861 (HMaster-EventLoopGroup-13-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1862 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 1863 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27538485 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 1864 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@695a71ea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1873 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 88 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 1975 (HMaster-EventLoopGroup-15-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1992 (MiniHBaseClusterRegionServer-EventLoopGroup-16-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2034 (MiniHBaseClusterRegionServer-EventLoopGroup-16-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2051 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e437044 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2062 (HMaster-EventLoopGroup-15-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2074 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c03c07b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2091 (MiniHBaseClusterRegionServer-EventLoopGroup-16-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2098 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@274f18d2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2100 (HMaster-EventLoopGroup-15-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2101 (Time-limited test-SendThread(127.0.0.1:58140)): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxn$SendThread.startConnect(ClientCnxn.java:1144) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1207) Thread 2102 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1da0e8f0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2103 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58a47a50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2136 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2137 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2293 (region-location-1): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@244cd9de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2400 (Abort regionserver monitor): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2482 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2587 (HMaster-EventLoopGroup-17-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2619 (MiniHBaseClusterRegionServer-EventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2661 (MiniHBaseClusterRegionServer-EventLoopGroup-18-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2678 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64006344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2692 (HMaster-EventLoopGroup-17-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2701 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d8641a2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2718 (MiniHBaseClusterRegionServer-EventLoopGroup-18-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2725 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be91210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2727 (HMaster-EventLoopGroup-17-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2728 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 599 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 2729 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7b746a76 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2730 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1c093c1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2739 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 2841 (HMaster-EventLoopGroup-19-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2858 (MiniHBaseClusterRegionServer-EventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2900 (MiniHBaseClusterRegionServer-EventLoopGroup-20-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2917 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@233de56d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2931 (HMaster-EventLoopGroup-19-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2940 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@60871a88 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 2957 (MiniHBaseClusterRegionServer-EventLoopGroup-20-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2964 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3878eb34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2966 (HMaster-EventLoopGroup-19-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2967 (Time-limited test-SendThread(127.0.0.1:62031)): State: TIMED_WAITING Blocked count: 0 Waited count: 617 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.client.StaticHostProvider.next(StaticHostProvider.java:362) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1204) Thread 2968 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6dc4d1e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 2969 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219e57b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3065 (HMaster-EventLoopGroup-21-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3082 (MiniHBaseClusterRegionServer-EventLoopGroup-22-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3112 (MiniHBaseClusterRegionServer-EventLoopGroup-22-2): State: RUNNABLE Blocked count: 3 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3132 (SnapshotHandlerChoreCleaner): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79954a59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3142 (HMaster-EventLoopGroup-21-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3151 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: app//org.apache.hadoop.hbase.regionserver.HRegionServer.reportProcedureDone(HRegionServer.java:3557) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:90) Thread 3157 (MiniHBaseClusterRegionServer-EventLoopGroup-22-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3193 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3194 (region-location-1): State: WAITING Blocked count: 3 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6cb3f14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3196 (HMaster-EventLoopGroup-21-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3311 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3312 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 3313 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49683): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 3310 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 3314 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 164 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 3315 (SyncThread:0): State: WAITING Blocked count: 0 Waited count: 364 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4d4bb5fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 3316 (ProcessThread(sid:0 cport:49683):): State: WAITING Blocked count: 0 Waited count: 379 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e3b087a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 3317 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 386 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4daf4e66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 3318 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3326 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f770010): State: TIMED_WAITING Blocked count: 1 Waited count: 651 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3327 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3328 (pool-1100-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3329 (qtp1377509192-3329): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3330 (qtp1377509192-3330): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3331 (qtp1377509192-3331): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3332 (qtp1377509192-3332): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3333 (qtp1377509192-3333-acceptor-0@5c3eb09e-ServerConnector@78a383c6{HTTP/1.1, (http/1.1)}{localhost:41239}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3334 (qtp1377509192-3334): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3335 (qtp1377509192-3335): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3336 (qtp1377509192-3336): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3337 (Session-HouseKeeper-2ecd32c0-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3338 (pool-1101-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3345 (FSEditLogAsync): State: WAITING Blocked count: 0 Waited count: 145 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46bc032c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3347 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3348 (IPC Server idle connection scanner for port 34141): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3350 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3353 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@687e5a89): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3354 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3341 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7d8449d8): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3339 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 109 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3340 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 31733 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3343 (Block report processor): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47a7a624 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3349 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3346 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3356 (IPC Server handler 0 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 426 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3357 (IPC Server handler 1 on default port 34141): State: TIMED_WAITING Blocked count: 3 Waited count: 429 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3358 (IPC Server handler 2 on default port 34141): State: TIMED_WAITING Blocked count: 2 Waited count: 426 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3359 (IPC Server handler 3 on default port 34141): State: TIMED_WAITING Blocked count: 5 Waited count: 427 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3360 (IPC Server handler 4 on default port 34141): State: TIMED_WAITING Blocked count: 6 Waited count: 425 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3361 (pool-1106-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3363 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6f4c8732): State: TIMED_WAITING Blocked count: 0 Waited count: 163 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3364 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@3dd894f0): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3365 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@a75ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3366 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@24b24059): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3367 (CacheReplicationMonitor(677237174)): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3377 (pool-1112-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3378 (qtp1577786156-3378): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3379 (qtp1577786156-3379-acceptor-0@d163538-ServerConnector@4c8a0263{HTTP/1.1, (http/1.1)}{localhost:40451}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3380 (qtp1577786156-3380): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3381 (qtp1577786156-3381): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3382 (Session-HouseKeeper-327d7abf-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3383 (nioEventLoopGroup-18-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3384 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@5aa0588f): State: TIMED_WAITING Blocked count: 2 Waited count: 650 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3386 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3387 (IPC Server idle connection scanner for port 39975): State: TIMED_WAITING Blocked count: 1 Waited count: 34 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3389 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3392 (Command processor): State: WAITING Blocked count: 0 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3153d1dd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3393 (BP-913626249-172.17.0.2-1732148442480 heartbeating to localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 155 Waited count: 266 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3394 (pool-1114-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3395 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins): State: TIMED_WAITING Blocked count: 130 Waited count: 131 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3376 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@86acec8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3396 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 131 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3388 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3385 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3397 (IPC Server handler 0 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3398 (IPC Server handler 1 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3399 (IPC Server handler 2 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 330 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3400 (IPC Server handler 3 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3401 (IPC Server handler 4 on default port 39975): State: TIMED_WAITING Blocked count: 0 Waited count: 329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3406 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3407 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3412 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3413 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2/current/BP-913626249-172.17.0.2-1732148442480): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3417 (pool-1109-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3420 (java.util.concurrent.ThreadPoolExecutor$Worker@6d2378c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3421 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3423 (LeaseRenewer:jenkins@localhost:34141): State: TIMED_WAITING Blocked count: 10 Waited count: 344 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3429 (HMaster-EventLoopGroup-23-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3430 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3431 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4c78661c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3432 (NIOWorkerThread-2): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3434 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3435 (zk-event-processor-pool-0): State: WAITING Blocked count: 13 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a467ae1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3436 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3437 (NIOWorkerThread-5): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3438 (NIOWorkerThread-6): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3439 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3440 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3441 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3442 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3443 (NIOWorkerThread-11): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3444 (NIOWorkerThread-12): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3445 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3446 (NIOWorkerThread-14): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3447 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3448 (NIOWorkerThread-16): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1243eb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3450 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f5fd45d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3451 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@43e2a383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3452 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 52 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c257565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3453 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 3139 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@255d9f72 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3454 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3455 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44414ab8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3456 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12690708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3457 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7580c67d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3458 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c6f71b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3459 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35861): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@7235f80f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3462 (MiniHBaseClusterRegionServer-EventLoopGroup-24-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3465 (Time-limited test-EventThread): State: WAITING Blocked count: 1 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28bef006 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3466 (zk-event-processor-pool-0): State: WAITING Blocked count: 14 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5bfe6842 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3467 (Time-limited test.LruBlockCache.EvictionThread): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Thread 3469 (LruBlockCacheStatsExecutor): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3471 (MobFileCache #0): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3472 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@36063768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3473 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 30 Waited count: 37 Waiting on java.util.concurrent.Semaphore$NonfairSync@71197954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3474 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 40 Waited count: 48 Waiting on java.util.concurrent.Semaphore$NonfairSync@2800e149 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3475 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@219f8229 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3476 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33745): State: WAITING Blocked count: 4 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3477 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33745): State: WAITING Blocked count: 2 Waited count: 14 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26a55c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3478 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ccc525c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3479 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3369e225 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3480 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745): State: WAITING Blocked count: 14 Waited count: 30 Waiting on java.util.concurrent.Semaphore$NonfairSync@6ebaa0ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3481 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33745): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@62f39c0d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3460 (M:0;5ed4808ef0e6:35861): State: TIMED_WAITING Blocked count: 1 Waited count: 3220 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:625) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3495 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3209 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3504 (MiniHBaseClusterRegionServer-EventLoopGroup-24-2): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3505 (DataXceiver for client DFSClient_NONMAPREDUCE_742485806_20 at /127.0.0.1:51882 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3506 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 111 Waited count: 112 Waiting on java.util.ArrayDeque@5b538681 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3507 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData-prefix:5ed4808ef0e6,35861,1732148444638): State: WAITING Blocked count: 0 Waited count: 237 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25be6a12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3510 (master:store-Flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor.flushLoop(MasterRegionFlusherAndCompactor.java:200) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor$$Lambda$479/0x00007f205c9f9bd8.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3518 (ProcedureDispatcherTimeoutThread): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread.run(RemoteProcedureDispatcher.java:328) Thread 3519 (5ed4808ef0e6:35861): State: WAITING Blocked count: 0 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3b481a9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.waitOnAssignQueue(AssignmentManager.java:2390) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.processAssignQueue(AssignmentManager.java:2412) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager$1.run(AssignmentManager.java:2352) Thread 3520 (normalizer-worker-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6df0091b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue.take(RegionNormalizerWorkQueue.java:146) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker.run(RegionNormalizerWorker.java:191) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3521 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3511 (ProcExecTimeout): State: TIMED_WAITING Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3512 (WorkerMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Thread 3513 (PEWorker-1): State: TIMED_WAITING Blocked count: 46 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3514 (PEWorker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3515 (PEWorker-3): State: TIMED_WAITING Blocked count: 23 Waited count: 61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3516 (PEWorker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3517 (PEWorker-5): State: TIMED_WAITING Blocked count: 1 Waited count: 32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Thread 3525 (OldWALsCleaner-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33b2e289 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.deleteFile(LogCleaner.java:172) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.lambda$createOldWalsCleaner$1(LogCleaner.java:152) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner$$Lambda$598/0x00007f205cacf298.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3526 (master/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 11 Waited count: 33 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3528 (snapshot-hfile-cleaner-cache-refresher): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3529 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cebe51d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$1.run(HFileCleaner.java:254) Thread 3532 (master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148447068): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58e9f6a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$2.run(HFileCleaner.java:269) Thread 3482 (RS:0;5ed4808ef0e6:33745): State: WAITING Blocked count: 3155 Waited count: 6290 Waiting on org.apache.hadoop.hbase.ipc.BlockingRpcCallback@679b7b9d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hbase.ipc.BlockingRpcCallback.get(BlockingRpcCallback.java:60) app//org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:339) app//org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) app//org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.regionServerReport(RegionServerStatusProtos.java:17278) app//org.apache.hadoop.hbase.regionserver.HRegionServer.tryRegionServerReport(HRegionServer.java:1079) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:902) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) java.base@17.0.11/java.security.AccessController.executePrivileged(AccessController.java:776) java.base@17.0.11/java.security.AccessController.doPrivileged(AccessController.java:399) java.base@17.0.11/javax.security.auth.Subject.doAs(Subject.java:376) app//org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) app//org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3533 (BootstrapNodeManager): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3535 (HMaster-EventLoopGroup-23-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3536 (RegionServerTracker-0): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6aa13e91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3537 (IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins.hfs.10): State: TIMED_WAITING Blocked count: 376 Waited count: 377 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3538 (IPC Parameter Sending Thread for localhost/127.0.0.1:34141): State: TIMED_WAITING Blocked count: 0 Waited count: 374 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3539 (JvmPauseMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 642 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:148) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3540 (RS:0;5ed4808ef0e6:33745-longCompactions-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a8b6720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3541 (regionserver/5ed4808ef0e6:0.Chore.1): State: TIMED_WAITING Blocked count: 2 Waited count: 565 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3543 (regionserver/5ed4808ef0e6:0.logRoller): State: TIMED_WAITING Blocked count: 0 Waited count: 3199 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3545 (MemStoreFlusher.0): State: TIMED_WAITING Blocked count: 0 Waited count: 3201 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:274) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:77) app//org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:323) Thread 3544 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@237b99c1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3542 (regionserver/5ed4808ef0e6:0.leaseChecker): State: TIMED_WAITING Blocked count: 0 Waited count: 3199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Thread 3548 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51900 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3549 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.util.ArrayDeque@566951c3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3550 (LeaseRenewer:jenkins.hfs.10@localhost:34141): State: TIMED_WAITING Blocked count: 10 Waited count: 331 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3551 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73c7bfc9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3560 (MiniHBaseClusterRegionServer-EventLoopGroup-24-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3561 (RS_OPEN_META-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@33092bee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3562 (DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51908 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010]): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) Thread 3563 (PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010, type=LAST_IN_PIPELINE): State: WAITING Blocked count: 24 Waited count: 25 Waiting on java.util.ArrayDeque@4cc309df Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3564 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.meta): State: WAITING Blocked count: 0 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5038aecd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3567 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1ecf6af4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3568 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3569 (HMaster-EventLoopGroup-23-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3570 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3571 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@303a142a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3572 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@25e18a26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3580 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@2db0d51d): State: TIMED_WAITING Blocked count: 5 Waited count: 639 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3581 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 3582 (pool-1206-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3583 (qtp1341535794-3583): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3584 (qtp1341535794-3584): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3585 (qtp1341535794-3585): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3586 (qtp1341535794-3586): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3587 (qtp1341535794-3587-acceptor-0@3dd8f85e-ServerConnector@5cf1b482{HTTP/1.1, (http/1.1)}{localhost:33319}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3588 (qtp1341535794-3588): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3589 (qtp1341535794-3589): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3590 (qtp1341535794-3590): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3591 (Session-HouseKeeper-1a5d7d7b-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3592 (pool-1207-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3599 (FSEditLogAsync): State: WAITING Blocked count: 1 Waited count: 200 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@501965e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3601 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3602 (IPC Server idle connection scanner for port 37411): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3604 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3607 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@16dcedcf): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3608 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3595 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2cab800b): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3593 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 107 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3594 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 31174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3597 (Block report processor): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3441d181 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 3603 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3600 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3610 (IPC Server handler 0 on default port 37411): State: TIMED_WAITING Blocked count: 4 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3611 (IPC Server handler 1 on default port 37411): State: TIMED_WAITING Blocked count: 0 Waited count: 449 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3612 (IPC Server handler 2 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 448 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3613 (IPC Server handler 3 on default port 37411): State: TIMED_WAITING Blocked count: 2 Waited count: 451 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3614 (IPC Server handler 4 on default port 37411): State: TIMED_WAITING Blocked count: 6 Waited count: 450 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3615 (pool-1212-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3617 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@45bccaf9): State: TIMED_WAITING Blocked count: 0 Waited count: 160 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3618 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@4915eba0): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3619 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@75ca663c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3620 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@1d86b1c2): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3621 (CacheReplicationMonitor(449845788)): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 3631 (pool-1218-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3632 (qtp1777667352-3632): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3633 (qtp1777667352-3633-acceptor-0@34a75aa7-ServerConnector@5b981b4e{HTTP/1.1, (http/1.1)}{localhost:43351}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3634 (qtp1777667352-3634): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3635 (qtp1777667352-3635): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3636 (Session-HouseKeeper-67fcf623-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3637 (nioEventLoopGroup-20-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3638 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@64c83422): State: TIMED_WAITING Blocked count: 7 Waited count: 638 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3640 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 3641 (IPC Server idle connection scanner for port 41981): State: TIMED_WAITING Blocked count: 1 Waited count: 33 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 3643 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3646 (Command processor): State: WAITING Blocked count: 0 Waited count: 109 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20e00196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 3647 (BP-493474764-172.17.0.2-1732148448119 heartbeating to localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 177 Waited count: 283 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3648 (pool-1220-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3630 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@68bc995a): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3642 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 3639 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 3651 (IPC Server handler 0 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3652 (IPC Server handler 1 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3653 (IPC Server handler 2 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 320 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3654 (IPC Server handler 3 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 320 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3655 (IPC Server handler 4 on default port 41981): State: TIMED_WAITING Blocked count: 0 Waited count: 323 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 3649 (IPC Client (1040632728) connection to localhost/127.0.0.1:37411 from jenkins): State: TIMED_WAITING Blocked count: 139 Waited count: 140 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 3650 (IPC Parameter Sending Thread for localhost/127.0.0.1:37411): State: TIMED_WAITING Blocked count: 0 Waited count: 140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3660 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3661 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 3665 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3667 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2/current/BP-493474764-172.17.0.2-1732148448119): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3671 (pool-1215-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3675 (java.util.concurrent.ThreadPoolExecutor$Worker@f1b5be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3676 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 3678 (LeaseRenewer:jenkins@localhost:37411): State: TIMED_WAITING Blocked count: 10 Waited count: 338 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3683 (HMaster-EventLoopGroup-25-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3684 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3685 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2bffda6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3686 (zk-event-processor-pool-0): State: WAITING Blocked count: 16 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e6696af Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3688 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@6d2232ce Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3689 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61f34110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3690 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.Semaphore$NonfairSync@348ac3f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3691 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 127 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7e13e821 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3692 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3693 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2db016f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3694 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@795b2be5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3695 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1b2cea74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3696 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a1f5bd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3697 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34653): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2095fd85 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 3700 (MiniHBaseClusterRegionServer-EventLoopGroup-26-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3698 (M:0;5ed4808ef0e6:34653): State: TIMED_WAITING Blocked count: 87 Waited count: 253 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$559/0x00007f205ca8e370.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) app//org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) Thread 3734 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3152 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 3743 (MiniHBaseClusterRegionServer-EventLoopGroup-26-2): State: WAITING Blocked count: 1 Waited count: 2 Waiting on java.util.concurrent.CompletableFuture$Signaller@7cf3dec4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.fetchPeerAddresses(HBaseReplicationEndpoint.java:203) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.chooseSinks(HBaseReplicationEndpoint.java:211) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.reportBadSink(HBaseReplicationEndpoint.java:257) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.onReplicateWALEntryException(HBaseInterClusterReplicationEndpoint.java:558) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.lambda$replicateEntries$2(HBaseInterClusterReplicationEndpoint.java:541) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint$$Lambda$1181/0x00007f205cd0d2d0.accept(Unknown Source) app//org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) app//org.apache.hadoop.hbase.util.FutureUtils$$Lambda$432/0x00007f205c9c66f0.accept(Unknown Source) java.base@17.0.11/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) java.base@17.0.11/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) java.base@17.0.11/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) java.base@17.0.11/java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) Thread 3746 (AsyncFSWAL-0-hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData-prefix:5ed4808ef0e6,34653,1732148450571): State: WAITING Blocked count: 0 Waited count: 222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7cc8576d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3760 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 32 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3778 (HMaster-EventLoopGroup-25-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3779 (RegionServerTracker-0): State: WAITING Blocked count: 5 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74ad5a4d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3787 (regionserver/5ed4808ef0e6:0.procedureResultReporter): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c9804ef Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 3799 (MiniHBaseClusterRegionServer-EventLoopGroup-26-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3806 (region-location-0): State: WAITING Blocked count: 4 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@12554d2e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3808 (HMaster-EventLoopGroup-25-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3809 (Time-limited test-SendThread(127.0.0.1:49683)): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 3810 (Time-limited test-EventThread): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51c15b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 3811 (zk-event-processor-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4fa00c5e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3824 (RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 18 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23f3d639 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3843 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3844 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3859 (RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 23 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@f387c04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3862 (AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.rep): State: WAITING Blocked count: 0 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@32c721c4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3866 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 4 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@170f29bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3868 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3869 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: TIMED_WAITING Blocked count: 274 Waited count: 456 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) Thread 3870 (RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978): State: WAITING Blocked count: 0 Waited count: 24 Waiting on java.util.concurrent.CompletableFuture$Signaller@31911a48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) Thread 3964 (Registry-endpoints-refresh-end-points): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher.mainLoop(RegistryEndpointsRefresher.java:92) app//org.apache.hadoop.hbase.client.RegistryEndpointsRefresher$$Lambda$805/0x00007f205cb8f300.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4191 (RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0-0): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76e4d6fd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4202 (Timer for 'DataNode' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 4277 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:09,373 DEBUG [M:0;5ed4808ef0e6:34653 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148468040Disabling compacts and flushes for region at 1732148468040Disabling writes for close at 1732148468040Obtaining lock to block concurrent updates at 1732148468041 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148468041Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=56847, getHeapSize=66424, getOffHeapSize=0, getCellsCount=114 at 1732148468041Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148468042 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148468042Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148468065 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148468065Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148468486 (+421 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148468501 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148468501Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148468910 (+409 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148468929 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148468929Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@151a293: reopening flushed file at 1732148469351 (+422 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c3631f4: reopening flushed file at 1732148469359 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5deb5b50: reopening flushed file at 1732148469365 (+6 ms)Flush failed: org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=191, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) at org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) at org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) at java.base/java.lang.Thread.run(Thread.java:840) Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=191, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ... 20 more at 1732148769373 (+300008 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732148769373 2024-11-21T00:26:09,374 WARN [M:0;5ed4808ef0e6:34653 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.DroppedSnapshotException: region: master:store,,1.1595e783b53d99cd5eef43b6debb2682. at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3095) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=191, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) ~[classes/:3.0.0-beta-2-SNAPSHOT] ... 12 more Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=191, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1960) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$4(AbstractFSWAL.java:728) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:728) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:718) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.WALUtil.doFullMarkerAppendTransaction(WALUtil.java:175) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.WALUtil.writeFlushMarker(WALUtil.java:97) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3070) ~[classes/:3.0.0-beta-2-SNAPSHOT] ... 12 more 2024-11-21T00:26:09,458 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:26:14,221 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:26:15,097 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:26:20,141 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:26:23,005 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:26:24,382 ERROR [M:0;5ed4808ef0e6:34653 {}] wal.AbstractWALProvider(289): cleanup WAL failed org.apache.hadoop.hbase.exceptions.TimeoutIOException: We have waited 15000ms, but the shutdown of WAL doesn't complete! Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.shutdown.wait.timeout.ms" at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1199) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.util.concurrent.TimeoutException at java.util.concurrent.FutureTask.get(FutureTask.java:204) ~[?:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) ~[classes/:?] ... 9 more 2024-11-21T00:26:24,382 WARN [M:0;5ed4808ef0e6:34653 {}] region.MasterRegion(142): Failed to shutdown WAL java.io.IOException: Failed to shutdown WALFactory at org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:347) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Suppressed: org.apache.hadoop.hbase.exceptions.TimeoutIOException: We have waited 15000ms, but the shutdown of WAL doesn't complete! Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.shutdown.wait.timeout.ms" at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1199) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.util.concurrent.TimeoutException at java.util.concurrent.FutureTask.get(FutureTask.java:204) ~[?:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) ~[classes/:?] ... 9 more 2024-11-21T00:26:24,382 INFO [M:0;5ed4808ef0e6:34653 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:26:24,382 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:26:24,382 INFO [M:0;5ed4808ef0e6:34653 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34653 2024-11-21T00:26:24,383 INFO [M:0;5ed4808ef0e6:34653 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:26:24,496 INFO [M:0;5ed4808ef0e6:34653 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:26:24,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:24,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34653-0x1015ac3b1060003, quorum=127.0.0.1:49683, baseZNode=/1-1254608113 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:24,520 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testHFileCyclicReplication Thread=438 (was 204) Potentially hanging thread: qtp1341535794-3590 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2/current/BP-913626249-172.17.0.2-1732148442480 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1/current/BP-913626249-172.17.0.2-1732148442480 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: MarkedDeleteBlockScrubberThread java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1341535794-3586 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PEWorker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@1d86b1c2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1577786156-3379-acceptor-0@d163538-ServerConnector@4c8a0263{HTTP/1.1, (http/1.1)}{localhost:40451} java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 2 on default port 41981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1/current/BP-493474764-172.17.0.2-1732148448119 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:37411/user/jenkins/test-data/72179211-86ca-128d-ca8c-b6786d708911/MasterData-prefix:5ed4808ef0e6,34653,1732148450571 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Socket Reader #1 for port 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Potentially hanging thread: IPC Server handler 0 on default port 39975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@4915eba0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: BootstrapNodeManager java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.util.JvmPauseMonitor$Monitor@64c83422 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Session-HouseKeeper-67fcf623-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RedundancyMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;5ed4808ef0e6:33745 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:906) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) java.base@17.0.11/java.security.AccessController.executePrivileged(AccessController.java:776) java.base@17.0.11/java.security.AccessController.doPrivileged(AccessController.java:399) java.base@17.0.11/javax.security.auth.Subject.doAs(Subject.java:376) app//org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) app//org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-23-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins.hfs.10 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-25-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 1 on default port 34141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: LruBlockCacheStatsExecutor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:34141 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: NIOWorkerThread-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1377509192-3335 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) Potentially hanging thread: qtp1377509192-3329 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server Responder java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Potentially hanging thread: qtp1577786156-3380 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: IPC Server handler 1 on default port 41981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: Time-limited test.LruBlockCache.EvictionThread java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.io.hfile.LruBlockCache$EvictionThread.run(LruBlockCache.java:957) Potentially hanging thread: pool-1220-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@687e5a89 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: FsDatasetAsyncDiskServiceFixer java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Potentially hanging thread: FSEditLogAsync java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: qtp1341535794-3588 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-25-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_742485806_20 at /127.0.0.1:51882 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: CacheReplicationMonitor(449845788) java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-24-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1377509192-3333-acceptor-0@5c3eb09e-ServerConnector@78a383c6{HTTP/1.1, (http/1.1)}{localhost:41239} java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: NIOWorkerThread-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: BP-913626249-172.17.0.2-1732148442480 heartbeating to localhost/127.0.0.1:34141 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@45bccaf9 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 0 on default port 34141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: Hadoop-Metrics-Updater-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1577786156-3381 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-1206-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 4 on default port 34141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: pool-1112-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 2 on default port 39975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: IPC Server idle connection scanner for port 37411 java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: pool-1207-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: BP-493474764-172.17.0.2-1732148448119 heartbeating to localhost/127.0.0.1:37411 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Block report processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Potentially hanging thread: ProcExecTimeout java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:274) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Potentially hanging thread: RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.shipper5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.parallelReplicate(HBaseInterClusterReplicationEndpoint.java:402) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.replicate(HBaseInterClusterReplicationEndpoint.java:460) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:185) app//org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) Potentially hanging thread: HMaster-EventLoopGroup-23-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@7d8449d8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-24-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@2cab800b java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 4 on default port 39975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data2) java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Potentially hanging thread: org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@86acec8 java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1777667352-3635 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1777667352-3632 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.util.JvmPauseMonitor$Monitor@5aa0588f java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Session-HouseKeeper-2ecd32c0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1341535794-3589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 3 on default port 34141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1377509192-3332 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: snapshot-hfile-cleaner-cache-refresher java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Server listener on 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Potentially hanging thread: VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2f1b7b47-b6c9-9858-1afe-4980137f5e7f/cluster_2584f223-84da-9ca6-4212-3365cf928ecd/data/data1) java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Potentially hanging thread: master:store-WAL-Roller java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Potentially hanging thread: VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data1) java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Potentially hanging thread: IPC Server Responder java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Potentially hanging thread: MarkedDeleteBlockScrubberThread java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1341535794-3585 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@16dcedcf java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: 5ed4808ef0e6:35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.waitOnAssignQueue(AssignmentManager.java:2390) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.processAssignQueue(AssignmentManager.java:2412) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager$1.run(AssignmentManager.java:2352) Potentially hanging thread: IPC Server handler 4 on default port 37411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: IPC Server handler 3 on default port 37411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: Socket Reader #1 for port 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Potentially hanging thread: IPC Server listener on 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Potentially hanging thread: WAL-Shutdown-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitUninterruptibly(AbstractQueuedSynchronizer.java:1580) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.waitForSafePoint(AbstractFSWAL.java:2022) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2106) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: FsDatasetAsyncDiskServiceFixer java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Potentially hanging thread: IPC Server listener on 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 3 on default port 39975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: IPC Server handler 0 on default port 41981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:49683) java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:49683) java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RequestThrottler java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51908 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-1106-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcessThread(sid:0 cport:49683): java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DatanodeAdminMonitor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: CacheReplicationMonitor(677237174) java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Potentially hanging thread: FSEditLogAsync java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1377509192-3334 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ConnnectionExpirer java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Potentially hanging thread: IPC Server handler 4 on default port 41981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: MemStoreFlusher.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:274) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:77) app//org.apache.hadoop.hbase.regionserver.MemStoreFlusher$FlushHandler.run(MemStoreFlusher.java:323) Potentially hanging thread: IPC Server idle connection scanner for port 41981 java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: NIOWorkerThread-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-25-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741834_1010, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.util.JvmPauseMonitor$Monitor@5f770010 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1377509192-3336 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PEWorker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37411 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:49683 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Potentially hanging thread: IPC Server idle connection scanner for port 39975 java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2) java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:37411 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/cluster_389f2191-0d73-949e-50d4-27e66144b44a/data/data2/current/BP-493474764-172.17.0.2-1732148448119 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1341535794-3583 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Block report processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Potentially hanging thread: org.apache.hadoop.util.JvmPauseMonitor$Monitor@2db0d51d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/MasterData-prefix:5ed4808ef0e6,35861,1732148444638 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1341535794-3584 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@f1b5be8[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: Socket Reader #1 for port 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Potentially hanging thread: DatanodeAdminMonitor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@75ca663c java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOServerCxnFactory.SelectorThread-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Potentially hanging thread: ProcedureDispatcherTimeoutThread java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:265) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread.run(RemoteProcedureDispatcher.java:328) Potentially hanging thread: LeaseRenewer:jenkins.hfs.10@localhost:34141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 3 on default port 41981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: PEWorker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Potentially hanging thread: pool-1218-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: NIOWorkerThread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: normalizer-worker-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue.take(RegionNormalizerWorkQueue.java:146) app//org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker.run(RegionNormalizerWorker.java:191) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 2 on default port 37411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: SyncThread:0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Potentially hanging thread: NIOServerCxnFactory.SelectorThread-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Potentially hanging thread: master/5ed4808ef0e6:0.Chore.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@24b24059 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server Responder java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Potentially hanging thread: pool-1114-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master:store-Flusher java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor.flushLoop(MasterRegionFlusherAndCompactor.java:200) app//org.apache.hadoop.hbase.master.region.MasterRegionFlusherAndCompactor$$Lambda$479/0x00007f205c9f9bd8.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-1109-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Hadoop-Metrics-Updater-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34141 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RedundancyMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Hadoop-Metrics-Updater-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-24-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: OldWALsCleaner-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.deleteFile(LogCleaner.java:172) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner.lambda$createOldWalsCleaner$1(LogCleaner.java:152) app//org.apache.hadoop.hbase.master.cleaner.LogCleaner$$Lambda$598/0x00007f205cacf298.run(Unknown Source) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-23-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-1215-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:49683) java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Potentially hanging thread: RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: PEWorker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Potentially hanging thread: IPC Server handler 1 on default port 39975 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: IPC Server handler 1 on default port 37411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: regionserver/5ed4808ef0e6:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: regionserver/5ed4808ef0e6:0.Chore.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@a75ffca java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1777667352-3634 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;5ed4808ef0e6:33745-longCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1341535794-3587-acceptor-0@3dd8f85e-ServerConnector@5cf1b482{HTTP/1.1, (http/1.1)}{localhost:33319} java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/5ed4808ef0e6:0.logRoller java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Potentially hanging thread: Socket Reader #1 for port 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Potentially hanging thread: IPC Server idle connection scanner for port 34141 java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1066463267_20 at /127.0.0.1:51900 [Receiving block BP-913626249-172.17.0.2-1732148442480:blk_1073741832_1008] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MobFileCache #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-26-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.CompletableFuture$Signaller.block(CompletableFuture.java:1864) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.CompletableFuture.waitingGet(CompletableFuture.java:1898) java.base@17.0.11/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2072) app//org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:182) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.fetchPeerAddresses(HBaseReplicationEndpoint.java:203) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.chooseSinks(HBaseReplicationEndpoint.java:211) app//org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.reportBadSink(HBaseReplicationEndpoint.java:257) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.onReplicateWALEntryException(HBaseInterClusterReplicationEndpoint.java:558) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.lambda$replicateEntries$2(HBaseInterClusterReplicationEndpoint.java:541) app//org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint$$Lambda$1181/0x00007f205cd0d2d0.accept(Unknown Source) app//org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) app//org.apache.hadoop.hbase.util.FutureUtils$$Lambda$432/0x00007f205c9c66f0.accept(Unknown Source) java.base@17.0.11/java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) java.base@17.0.11/java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) java.base@17.0.11/java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) java.base@17.0.11/java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) app//org.apache.hadoop.hbase.client.AsyncRegionServerAdmin$1.run(AsyncRegionServerAdmin.java:108) app//org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) app//org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) app//org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) app//org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) app//org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) app//org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) app//org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) app//org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.userEventTriggered(BufferCallBeforeInitHandler.java:106) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:398) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:376) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireUserEventTriggered(AbstractChannelHandlerContext.java:368) app//org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.userEventTriggered(DefaultChannelPipeline.java:1375) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:396) app//org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeUserEventTriggered(AbstractChannelHandlerContext.java:376) app//org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireUserEventTriggered(DefaultChannelPipeline.java:862) app//org.apache.hadoop.hbase.ipc.NettyRpcConnection.failInit(NettyRpcConnection.java:210) app//org.apache.hadoop.hbase.ipc.NettyRpcConnection$2.fail(NettyRpcConnection.java:414) app//org.apache.hadoop.hbase.ipc.NettyRpcConnection$2.operationComplete(NettyRpcConnection.java:421) app//org.apache.hadoop.hbase.ipc.NettyRpcConnection$2.operationComplete(NettyRpcConnection.java:389) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setFailure0(DefaultPromise.java:629) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.tryFailure(DefaultPromise.java:118) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:679) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:698) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1577786156-3378 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@68bc995a java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a-prefix:5ed4808ef0e6,33745,1732148444978.rep java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:37411 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Server handler 0 on default port 37411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@6d2378c3[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148447068 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$2.run(HFileCleaner.java:269) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37411 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RegionServerTracker-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp1377509192-3331 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server Responder java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Potentially hanging thread: Session-HouseKeeper-1a5d7d7b-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-1100-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 2 on default port 34141 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: M:0;5ed4808ef0e6:35861 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:625) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@3dd894f0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: qtp1777667352-3633-acceptor-0@34a75aa7-ServerConnector@5b981b4e{HTTP/1.1, (http/1.1)}{localhost:43351} java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-26-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PEWorker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:167) app//org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler.poll(AbstractProcedureScheduler.java:149) app//org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2176) Potentially hanging thread: Hadoop-Metrics-Updater-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-1212-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server listener on 0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Potentially hanging thread: qtp1377509192-3330 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Session-HouseKeeper-327d7abf-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148447068 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$1.run(HFileCleaner.java:254) Potentially hanging thread: pool-1101-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33745 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6f4c8732 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOWorkerThread-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:49683) java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Potentially hanging thread: JvmPauseMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:148) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-26-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: PacketResponder: BP-913626249-172.17.0.2-1732148442480:blk_1073741830_1006, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: WorkerMonitor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.DelayQueue.poll(DelayQueue.java:279) app//org.apache.hadoop.hbase.procedure2.util.DelayedUtil.takeWithoutInterrupt(DelayedUtil.java:81) app//org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread.run(TimeoutExecutorThread.java:56) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35861 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) - Thread LEAK? -, OpenFileDescriptor=718 (was 570) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=645 (was 746), ProcessCount=11 (was 11), AvailableMemoryMB=1882 (was 824) - AvailableMemoryMB LEAK? - 2024-11-21T00:26:24,563 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testCyclicReplication1 Thread=438, OpenFileDescriptor=718, MaxFileDescriptor=1048576, SystemLoadAverage=645, ProcessCount=11, AvailableMemoryMB=1878 2024-11-21T00:26:24,584 INFO [Time-limited test {}] replication.TestMasterReplication(148): testSimplePutDelete 2024-11-21T00:26:24,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.log.dir so I do NOT create it in target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496 2024-11-21T00:26:24,585 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:26:24,585 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.tmp.dir so I do NOT create it in target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496 2024-11-21T00:26:24,585 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/682bab2f-bd96-c02d-7367-f08e2f62d5ce/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:26:24,585 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496 2024-11-21T00:26:24,585 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb, deleteOnExit=true 2024-11-21T00:26:24,681 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb/zookeeper_0, clientPort=57893, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:26:24,684 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57893 2024-11-21T00:26:24,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:26:24,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:26:24,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/test.cache.data in system properties and HBase conf 2024-11-21T00:26:24,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:26:24,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:26:24,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:26:24,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:26:24,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:26:24,685 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:24,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:26:24,687 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:26:25,402 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:25,415 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:25,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:25,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:25,465 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:26:25,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:25,472 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f5049d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:25,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d76cdd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:25,618 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@29bd1fb9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/java.io.tmpdir/jetty-localhost-43803-hadoop-hdfs-3_4_1-tests_jar-_-any-4178142380642408899/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:26:25,620 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6006c435{HTTP/1.1, (http/1.1)}{localhost:43803} 2024-11-21T00:26:25,621 INFO [Time-limited test {}] server.Server(415): Started @520170ms 2024-11-21T00:26:26,252 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:26,258 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:26,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:26,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:26,292 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:26:26,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@147350c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:26,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c0d6b7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:26,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7017af6e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/java.io.tmpdir/jetty-localhost-46001-hadoop-hdfs-3_4_1-tests_jar-_-any-5241365644798028827/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:26:26,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@b1a67c{HTTP/1.1, (http/1.1)}{localhost:46001} 2024-11-21T00:26:26,454 INFO [Time-limited test {}] server.Server(415): Started @521004ms 2024-11-21T00:26:26,456 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:26:27,371 WARN [Thread-2226 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb/data/data1/current/BP-1178125728-172.17.0.2-1732148784711/current, will proceed with Du for space computation calculation, 2024-11-21T00:26:27,380 WARN [Thread-2227 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb/data/data2/current/BP-1178125728-172.17.0.2-1732148784711/current, will proceed with Du for space computation calculation, 2024-11-21T00:26:27,513 WARN [Thread-2214 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:26:27,526 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x71d355cdbd8d4547 with lease ID 0xa9fd4612e38392be: Processing first storage report for DS-6e5bc986-7d7f-441a-a6dc-2b7c4a3e8340 from datanode DatanodeRegistration(127.0.0.1:44835, datanodeUuid=bd921b04-f6f2-4edf-972b-a6794fdfc30e, infoPort=46821, infoSecurePort=0, ipcPort=34619, storageInfo=lv=-57;cid=testClusterID;nsid=1837078286;c=1732148784711) 2024-11-21T00:26:27,526 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71d355cdbd8d4547 with lease ID 0xa9fd4612e38392be: from storage DS-6e5bc986-7d7f-441a-a6dc-2b7c4a3e8340 node DatanodeRegistration(127.0.0.1:44835, datanodeUuid=bd921b04-f6f2-4edf-972b-a6794fdfc30e, infoPort=46821, infoSecurePort=0, ipcPort=34619, storageInfo=lv=-57;cid=testClusterID;nsid=1837078286;c=1732148784711), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:26:27,526 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x71d355cdbd8d4547 with lease ID 0xa9fd4612e38392be: Processing first storage report for DS-9e4599cd-250b-4d2f-9f96-162efd8f3e16 from datanode DatanodeRegistration(127.0.0.1:44835, datanodeUuid=bd921b04-f6f2-4edf-972b-a6794fdfc30e, infoPort=46821, infoSecurePort=0, ipcPort=34619, storageInfo=lv=-57;cid=testClusterID;nsid=1837078286;c=1732148784711) 2024-11-21T00:26:27,526 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71d355cdbd8d4547 with lease ID 0xa9fd4612e38392be: from storage DS-9e4599cd-250b-4d2f-9f96-162efd8f3e16 node DatanodeRegistration(127.0.0.1:44835, datanodeUuid=bd921b04-f6f2-4edf-972b-a6794fdfc30e, infoPort=46821, infoSecurePort=0, ipcPort=34619, storageInfo=lv=-57;cid=testClusterID;nsid=1837078286;c=1732148784711), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:26:27,576 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496 2024-11-21T00:26:27,577 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:27,579 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:27,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:26:27,671 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4 with version=8 2024-11-21T00:26:27,671 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/hbase-staging 2024-11-21T00:26:27,675 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:26:27,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:27,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:27,676 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:26:27,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:27,677 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:26:27,677 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:26:27,677 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:26:27,692 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38531 2024-11-21T00:26:27,693 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38531 connecting to ZooKeeper ensemble=127.0.0.1:57893 2024-11-21T00:26:27,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385310x0, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:27,823 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38531-0x1015ac8e9d30000 connected 2024-11-21T00:26:28,053 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:28,054 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:28,064 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on znode that does not yet exist, /01745974643/running 2024-11-21T00:26:28,064 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4, hbase.cluster.distributed=false 2024-11-21T00:26:28,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on znode that does not yet exist, /01745974643/acl 2024-11-21T00:26:28,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38531 2024-11-21T00:26:28,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38531 2024-11-21T00:26:28,120 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38531 2024-11-21T00:26:28,139 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38531 2024-11-21T00:26:28,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38531 2024-11-21T00:26:28,186 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:26:28,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:28,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:28,186 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:26:28,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:28,186 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:26:28,186 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:26:28,186 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:26:28,192 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43823 2024-11-21T00:26:28,193 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43823 connecting to ZooKeeper ensemble=127.0.0.1:57893 2024-11-21T00:26:28,194 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:28,197 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:28,225 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:438230x0, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on znode that does not yet exist, /01745974643/running 2024-11-21T00:26:28,225 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:26:28,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:438230x0, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:28,233 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:26:28,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:438230x0, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on znode that does not yet exist, /01745974643/master 2024-11-21T00:26:28,235 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:438230x0, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on znode that does not yet exist, /01745974643/acl 2024-11-21T00:26:28,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43823 2024-11-21T00:26:28,272 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43823-0x1015ac8e9d30001 connected 2024-11-21T00:26:28,280 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43823 2024-11-21T00:26:28,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43823 2024-11-21T00:26:28,321 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43823 2024-11-21T00:26:28,329 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43823 2024-11-21T00:26:28,353 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:38531 2024-11-21T00:26:28,356 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /01745974643/backup-masters/5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:28,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643/backup-masters 2024-11-21T00:26:28,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643/backup-masters 2024-11-21T00:26:28,368 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on existing znode=/01745974643/backup-masters/5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:28,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01745974643/master 2024-11-21T00:26:28,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:28,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:28,380 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on existing znode=/01745974643/master 2024-11-21T00:26:28,380 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /01745974643/backup-masters/5ed4808ef0e6,38531,1732148787674 from backup master directory 2024-11-21T00:26:28,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643/backup-masters 2024-11-21T00:26:28,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01745974643/backup-masters/5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:28,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643/backup-masters 2024-11-21T00:26:28,389 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:26:28,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:28,414 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/hbase.id] with ID: 5e753136-7b83-4eba-87dd-378bf22a8590 2024-11-21T00:26:28,414 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/.tmp/hbase.id 2024-11-21T00:26:28,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:26:28,461 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/.tmp/hbase.id]:[hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/hbase.id] 2024-11-21T00:26:28,473 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:28,473 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:26:28,475 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-21T00:26:28,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:28,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:28,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:26:28,628 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:26:28,629 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:26:28,637 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:28,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:26:28,692 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store 2024-11-21T00:26:28,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:26:28,764 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:28,764 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:26:28,764 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:28,764 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:28,765 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-21T00:26:28,765 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:28,765 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:28,766 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148788764Disabling compacts and flushes for region at 1732148788764Disabling writes for close at 1732148788765 (+1 ms)Writing region close event to WAL at 1732148788765Closed at 1732148788765 2024-11-21T00:26:28,767 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/.initializing 2024-11-21T00:26:28,767 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/WALs/5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:28,768 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:26:28,778 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C38531%2C1732148787674, suffix=, logDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/WALs/5ed4808ef0e6,38531,1732148787674, archiveDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/oldWALs, maxLogs=10 2024-11-21T00:26:28,806 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/WALs/5ed4808ef0e6,38531,1732148787674/5ed4808ef0e6%2C38531%2C1732148787674.1732148788778, exclude list is [], retry=0 2024-11-21T00:26:28,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44835,DS-6e5bc986-7d7f-441a-a6dc-2b7c4a3e8340,DISK] 2024-11-21T00:26:28,857 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/WALs/5ed4808ef0e6,38531,1732148787674/5ed4808ef0e6%2C38531%2C1732148787674.1732148788778 2024-11-21T00:26:28,875 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46821:46821)] 2024-11-21T00:26:28,875 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:28,875 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:28,876 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,876 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:26:28,886 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:28,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:28,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:26:28,889 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:28,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:28,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:26:28,891 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:28,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:28,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:26:28,893 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:28,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:28,894 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,895 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,896 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,897 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,897 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:26:28,899 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:28,916 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:28,917 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66500592, jitterRate=-0.009063959121704102}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:26:28,917 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148788876Initializing all the Stores at 1732148788877 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148788877Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148788879 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148788879Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148788879Cleaning up temporary data from old regions at 1732148788897 (+18 ms)Region opened successfully at 1732148788917 (+20 ms) 2024-11-21T00:26:28,928 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:26:28,937 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55aeb215, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:26:28,938 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:26:28,938 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:26:28,938 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:26:28,939 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:26:28,939 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:26:28,940 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:26:28,940 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:26:28,958 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:26:28,959 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Unable to get data of znode /01745974643/balancer because node does not exist (not necessarily an error) 2024-11-21T00:26:28,978 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01745974643/balancer already deleted, retry=false 2024-11-21T00:26:28,979 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:26:28,980 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Unable to get data of znode /01745974643/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:26:28,989 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01745974643/normalizer already deleted, retry=false 2024-11-21T00:26:28,989 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:26:28,990 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Unable to get data of znode /01745974643/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:26:28,999 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01745974643/switch/split already deleted, retry=false 2024-11-21T00:26:29,001 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Unable to get data of znode /01745974643/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:26:29,010 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01745974643/switch/merge already deleted, retry=false 2024-11-21T00:26:29,013 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Unable to get data of znode /01745974643/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:26:29,021 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01745974643/snapshot-cleanup already deleted, retry=false 2024-11-21T00:26:29,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01745974643/running 2024-11-21T00:26:29,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01745974643/running 2024-11-21T00:26:29,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:29,035 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:29,039 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,38531,1732148787674, sessionid=0x1015ac8e9d30000, setting cluster-up flag (Was=false) 2024-11-21T00:26:29,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:29,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:29,094 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01745974643/flush-table-proc/acquired, /01745974643/flush-table-proc/reached, /01745974643/flush-table-proc/abort 2024-11-21T00:26:29,096 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:29,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:29,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:29,147 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01745974643/online-snapshot/acquired, /01745974643/online-snapshot/reached, /01745974643/online-snapshot/abort 2024-11-21T00:26:29,148 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:29,162 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:26:29,183 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:29,183 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:26:29,183 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:26:29,183 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,38531,1732148787674 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:26:29,186 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,216 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:29,216 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:26:29,217 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:29,217 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:26:29,270 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148819270 2024-11-21T00:26:29,270 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:26:29,270 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:26:29,270 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:26:29,271 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:26:29,271 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:26:29,271 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:26:29,281 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,287 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:26:29,287 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:26:29,287 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:26:29,287 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:26:29,303 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:26:29,303 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:26:29,317 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148789303,5,FailOnTimeoutGroup] 2024-11-21T00:26:29,353 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148789317,5,FailOnTimeoutGroup] 2024-11-21T00:26:29,353 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,353 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:26:29,353 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,353 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,353 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(746): ClusterId : 5e753136-7b83-4eba-87dd-378bf22a8590 2024-11-21T00:26:29,353 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:26:29,373 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:26:29,373 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:26:29,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:26:29,380 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:26:29,380 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4 2024-11-21T00:26:29,393 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:26:29,393 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@689f91f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:26:29,434 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:43823 2024-11-21T00:26:29,435 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:26:29,435 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:26:29,435 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:26:29,436 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,38531,1732148787674 with port=43823, startcode=1732148788181 2024-11-21T00:26:29,436 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:26:29,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:26:29,456 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:29,504 INFO [HMaster-EventLoopGroup-27-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44603, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.12 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:26:29,505 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38531 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:29,505 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38531 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:29,507 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4 2024-11-21T00:26:29,507 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41751 2024-11-21T00:26:29,507 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:26:29,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:26:29,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643/rs 2024-11-21T00:26:29,516 DEBUG [RS:0;5ed4808ef0e6:43823 {}] zookeeper.ZKUtil(111): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on existing znode=/01745974643/rs/5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:29,516 WARN [RS:0;5ed4808ef0e6:43823 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:26:29,516 INFO [RS:0;5ed4808ef0e6:43823 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:29,516 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:29,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:26:29,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:29,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:29,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:26:29,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:26:29,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:29,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:29,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:26:29,525 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:26:29,525 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:29,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:29,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:26:29,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:26:29,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:29,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:29,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:26:29,531 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740 2024-11-21T00:26:29,531 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740 2024-11-21T00:26:29,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:26:29,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:26:29,533 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:26:29,534 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:26:29,537 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,43823,1732148788181] 2024-11-21T00:26:29,553 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:26:29,565 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:29,565 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64699325, jitterRate=-0.03590492904186249}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:26:29,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148789456Initializing all the Stores at 1732148789457 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148789457Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148789508 (+51 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148789508Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148789508Cleaning up temporary data from old regions at 1732148789532 (+24 ms)Region opened successfully at 1732148789566 (+34 ms) 2024-11-21T00:26:29,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:26:29,566 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:26:29,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:26:29,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:26:29,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:26:29,577 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:26:29,593 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:26:29,593 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148789566Disabling compacts and flushes for region at 1732148789566Disabling writes for close at 1732148789566Writing region close event to WAL at 1732148789593 (+27 ms)Closed at 1732148789593 2024-11-21T00:26:29,594 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:29,594 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:26:29,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:26:29,596 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:26:29,597 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:26:29,608 INFO [RS:0;5ed4808ef0e6:43823 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:26:29,608 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,617 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:26:29,619 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:26:29,619 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,619 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,620 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:29,620 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:26:29,620 DEBUG [RS:0;5ed4808ef0e6:43823 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:26:29,645 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,645 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,645 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,645 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,645 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,645 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,43823,1732148788181-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:26:29,677 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:26:29,677 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,43823,1732148788181-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,677 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,678 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.Replication(171): 5ed4808ef0e6,43823,1732148788181 started 2024-11-21T00:26:29,698 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:29,699 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,43823,1732148788181, RpcServer on 5ed4808ef0e6/172.17.0.2:43823, sessionid=0x1015ac8e9d30001 2024-11-21T00:26:29,699 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:26:29,699 DEBUG [RS:0;5ed4808ef0e6:43823 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:29,699 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,43823,1732148788181' 2024-11-21T00:26:29,699 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01745974643/flush-table-proc/abort' 2024-11-21T00:26:29,700 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01745974643/flush-table-proc/acquired' 2024-11-21T00:26:29,700 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:26:29,700 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:26:29,700 DEBUG [RS:0;5ed4808ef0e6:43823 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:29,700 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,43823,1732148788181' 2024-11-21T00:26:29,701 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01745974643/online-snapshot/abort' 2024-11-21T00:26:29,701 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01745974643/online-snapshot/acquired' 2024-11-21T00:26:29,701 DEBUG [RS:0;5ed4808ef0e6:43823 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:26:29,701 INFO [RS:0;5ed4808ef0e6:43823 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:26:29,701 INFO [RS:0;5ed4808ef0e6:43823 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:26:29,747 WARN [5ed4808ef0e6:38531 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:26:29,802 INFO [RS:0;5ed4808ef0e6:43823 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:26:29,803 INFO [RS:0;5ed4808ef0e6:43823 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C43823%2C1732148788181, suffix=, logDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181, archiveDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/oldWALs, maxLogs=10 2024-11-21T00:26:29,828 DEBUG [RS:0;5ed4808ef0e6:43823 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, exclude list is [], retry=0 2024-11-21T00:26:29,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44835,DS-6e5bc986-7d7f-441a-a6dc-2b7c4a3e8340,DISK] 2024-11-21T00:26:29,885 INFO [RS:0;5ed4808ef0e6:43823 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 2024-11-21T00:26:29,904 DEBUG [RS:0;5ed4808ef0e6:43823 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46821:46821)] 2024-11-21T00:26:29,997 DEBUG [5ed4808ef0e6:38531 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:26:29,998 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:29,999 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,43823,1732148788181, state=OPENING 2024-11-21T00:26:30,188 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:26:30,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:30,261 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:30,262 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01745974643/meta-region-server: CHANGED 2024-11-21T00:26:30,268 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01745974643/meta-region-server: CHANGED 2024-11-21T00:26:30,270 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:26:30,271 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,43823,1732148788181}] 2024-11-21T00:26:30,433 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:26:30,445 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55585, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:26:30,467 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:26:30,467 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:30,467 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:26:30,469 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C43823%2C1732148788181.meta, suffix=.meta, logDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181, archiveDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/oldWALs, maxLogs=10 2024-11-21T00:26:30,509 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.meta.1732148790469.meta, exclude list is [], retry=0 2024-11-21T00:26:30,522 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44835,DS-6e5bc986-7d7f-441a-a6dc-2b7c4a3e8340,DISK] 2024-11-21T00:26:30,544 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.meta.1732148790469.meta 2024-11-21T00:26:30,545 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46821:46821)] 2024-11-21T00:26:30,546 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:30,546 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:26:30,546 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:26:30,546 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:26:30,547 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:26:30,547 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:26:30,547 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:30,547 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:26:30,547 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:26:30,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:26:30,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:26:30,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:30,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:30,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:26:30,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:26:30,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:30,566 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:30,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:26:30,567 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:26:30,567 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:30,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:30,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:26:30,573 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:26:30,573 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:30,576 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:30,577 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:26:30,579 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740 2024-11-21T00:26:30,580 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740 2024-11-21T00:26:30,581 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:26:30,581 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:26:30,582 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:26:30,583 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:26:30,584 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73625226, jitterRate=0.0971013605594635}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:26:30,584 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:26:30,584 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148790547Writing region info on filesystem at 1732148790547Initializing all the Stores at 1732148790553 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148790553Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148790562 (+9 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148790562Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148790562Cleaning up temporary data from old regions at 1732148790581 (+19 ms)Running coprocessor post-open hooks at 1732148790584 (+3 ms)Region opened successfully at 1732148790584 2024-11-21T00:26:30,586 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148790433 2024-11-21T00:26:30,589 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:30,590 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:26:30,590 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:26:30,591 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,43823,1732148788181, state=OPEN 2024-11-21T00:26:30,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01745974643/meta-region-server 2024-11-21T00:26:30,832 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:30,833 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01745974643/meta-region-server: CHANGED 2024-11-21T00:26:30,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:26:30,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,43823,1732148788181 in 562 msec 2024-11-21T00:26:30,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01745974643/meta-region-server 2024-11-21T00:26:30,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01745974643/meta-region-server: CHANGED 2024-11-21T00:26:30,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:26:30,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.2420 sec 2024-11-21T00:26:30,840 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:30,840 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:26:30,845 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:30,845 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43823,1732148788181, seqNum=-1] 2024-11-21T00:26:30,845 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:30,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-28-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54685, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:26:30,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6760 sec 2024-11-21T00:26:30,854 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148790854, completionTime=-1 2024-11-21T00:26:30,854 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:26:30,854 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:26:30,857 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:26:30,857 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148850857 2024-11-21T00:26:30,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148910858 2024-11-21T00:26:30,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-21T00:26:30,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38531,1732148787674-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:30,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38531,1732148787674-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:30,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38531,1732148787674-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:30,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:38531, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:30,858 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:30,865 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:26:30,876 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:30,883 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.494sec 2024-11-21T00:26:30,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:26:30,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:26:30,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:26:30,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:26:30,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:26:30,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38531,1732148787674-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:26:30,884 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38531,1732148787674-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:26:30,903 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:26:30,903 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:26:30,903 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38531,1732148787674-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:30,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33e6a8f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:30,968 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38531,-1 for getting cluster id 2024-11-21T00:26:30,969 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:30,981 DEBUG [HMaster-EventLoopGroup-27-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e753136-7b83-4eba-87dd-378bf22a8590' 2024-11-21T00:26:30,984 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:30,984 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e753136-7b83-4eba-87dd-378bf22a8590" 2024-11-21T00:26:30,985 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a38aa0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:30,985 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38531,-1] 2024-11-21T00:26:30,985 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:30,986 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:30,987 INFO [HMaster-EventLoopGroup-27-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56740, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:30,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7167185c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:30,997 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:31,001 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43823,1732148788181, seqNum=-1] 2024-11-21T00:26:31,001 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:31,010 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:26:31,012 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-28-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59774, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:26:31,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:31,018 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:57893 2024-11-21T00:26:31,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:31,080 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015ac8e9d30002 connected 2024-11-21T00:26:31,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.log.dir so I do NOT create it in target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f 2024-11-21T00:26:31,106 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.tmp.dir so I do NOT create it in target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f 2024-11-21T00:26:31,107 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:26:31,107 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/cluster_594054de-0935-d9f2-ef42-a01df030f7f4, deleteOnExit=true 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/test.cache.data in system properties and HBase conf 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:26:31,107 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:26:31,108 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:26:31,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:26:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:26:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:26:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:26:31,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:26:31,583 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:31,649 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:31,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:31,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:31,699 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:26:31,705 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:31,708 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@280b75e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:31,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e207ce7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:31,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4589e064{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/java.io.tmpdir/jetty-localhost-38577-hadoop-hdfs-3_4_1-tests_jar-_-any-1355296990763363929/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:26:31,892 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@585a476f{HTTP/1.1, (http/1.1)}{localhost:38577} 2024-11-21T00:26:31,892 INFO [Time-limited test {}] server.Server(415): Started @526441ms 2024-11-21T00:26:32,640 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:32,659 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:32,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:32,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:32,708 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:26:32,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@393ac790{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:32,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ddcd1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:32,907 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54e75703{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/java.io.tmpdir/jetty-localhost-45283-hadoop-hdfs-3_4_1-tests_jar-_-any-15603085716481044555/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:26:32,907 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e28b32{HTTP/1.1, (http/1.1)}{localhost:45283} 2024-11-21T00:26:32,907 INFO [Time-limited test {}] server.Server(415): Started @527457ms 2024-11-21T00:26:32,910 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:26:34,185 WARN [Thread-2347 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/cluster_594054de-0935-d9f2-ef42-a01df030f7f4/data/data1/current/BP-1270601012-172.17.0.2-1732148791146/current, will proceed with Du for space computation calculation, 2024-11-21T00:26:34,191 WARN [Thread-2348 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/cluster_594054de-0935-d9f2-ef42-a01df030f7f4/data/data2/current/BP-1270601012-172.17.0.2-1732148791146/current, will proceed with Du for space computation calculation, 2024-11-21T00:26:34,341 WARN [Thread-2335 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:26:34,349 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd533e30745f9058 with lease ID 0x21c3c8cec1170dfe: Processing first storage report for DS-1d5598d7-0b5a-4c22-b7c8-be9810911139 from datanode DatanodeRegistration(127.0.0.1:44701, datanodeUuid=be319525-6882-46f1-ac97-61595c0575c5, infoPort=42263, infoSecurePort=0, ipcPort=33611, storageInfo=lv=-57;cid=testClusterID;nsid=1986204510;c=1732148791146) 2024-11-21T00:26:34,349 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd533e30745f9058 with lease ID 0x21c3c8cec1170dfe: from storage DS-1d5598d7-0b5a-4c22-b7c8-be9810911139 node DatanodeRegistration(127.0.0.1:44701, datanodeUuid=be319525-6882-46f1-ac97-61595c0575c5, infoPort=42263, infoSecurePort=0, ipcPort=33611, storageInfo=lv=-57;cid=testClusterID;nsid=1986204510;c=1732148791146), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T00:26:34,349 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd533e30745f9058 with lease ID 0x21c3c8cec1170dfe: Processing first storage report for DS-eb275f18-58ad-47d3-abea-f45b02beca50 from datanode DatanodeRegistration(127.0.0.1:44701, datanodeUuid=be319525-6882-46f1-ac97-61595c0575c5, infoPort=42263, infoSecurePort=0, ipcPort=33611, storageInfo=lv=-57;cid=testClusterID;nsid=1986204510;c=1732148791146) 2024-11-21T00:26:34,349 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd533e30745f9058 with lease ID 0x21c3c8cec1170dfe: from storage DS-eb275f18-58ad-47d3-abea-f45b02beca50 node DatanodeRegistration(127.0.0.1:44701, datanodeUuid=be319525-6882-46f1-ac97-61595c0575c5, infoPort=42263, infoSecurePort=0, ipcPort=33611, storageInfo=lv=-57;cid=testClusterID;nsid=1986204510;c=1732148791146), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:26:34,409 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f 2024-11-21T00:26:34,409 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:34,412 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:34,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:26:34,475 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8 with version=8 2024-11-21T00:26:34,475 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/hbase-staging 2024-11-21T00:26:34,477 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:26:34,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:34,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:34,478 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:26:34,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:34,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:26:34,478 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:26:34,478 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:26:34,487 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35843 2024-11-21T00:26:34,488 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35843 connecting to ZooKeeper ensemble=127.0.0.1:57893 2024-11-21T00:26:34,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:358430x0, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:34,597 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35843-0x1015ac8e9d30003 connected 2024-11-21T00:26:34,663 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:34,665 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:34,675 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on znode that does not yet exist, /1-991210048/running 2024-11-21T00:26:34,675 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8, hbase.cluster.distributed=false 2024-11-21T00:26:34,677 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on znode that does not yet exist, /1-991210048/acl 2024-11-21T00:26:34,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35843 2024-11-21T00:26:34,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35843 2024-11-21T00:26:34,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35843 2024-11-21T00:26:34,799 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35843 2024-11-21T00:26:34,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35843 2024-11-21T00:26:34,848 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:26:34,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:34,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:34,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:26:34,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:26:34,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:26:34,851 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:26:34,851 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:26:34,876 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38425 2024-11-21T00:26:34,877 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38425 connecting to ZooKeeper ensemble=127.0.0.1:57893 2024-11-21T00:26:34,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:34,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:34,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:384250x0, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:34,933 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:384250x0, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on znode that does not yet exist, /1-991210048/running 2024-11-21T00:26:34,934 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:26:34,943 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38425-0x1015ac8e9d30004 connected 2024-11-21T00:26:34,971 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:26:34,973 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on znode that does not yet exist, /1-991210048/master 2024-11-21T00:26:34,975 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on znode that does not yet exist, /1-991210048/acl 2024-11-21T00:26:35,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38425 2024-11-21T00:26:35,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38425 2024-11-21T00:26:35,040 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38425 2024-11-21T00:26:35,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38425 2024-11-21T00:26:35,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38425 2024-11-21T00:26:35,105 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:35843 2024-11-21T00:26:35,112 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-991210048/backup-masters/5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:35,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048/backup-masters 2024-11-21T00:26:35,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048/backup-masters 2024-11-21T00:26:35,122 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on existing znode=/1-991210048/backup-masters/5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:35,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:35,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-991210048/master 2024-11-21T00:26:35,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:35,137 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on existing znode=/1-991210048/master 2024-11-21T00:26:35,137 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-991210048/backup-masters/5ed4808ef0e6,35843,1732148794477 from backup master directory 2024-11-21T00:26:35,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048/backup-masters 2024-11-21T00:26:35,147 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:26:35,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-991210048/backup-masters/5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:35,147 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:35,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048/backup-masters 2024-11-21T00:26:35,158 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/hbase.id] with ID: d093c870-e608-410e-9fad-5234a879f7a4 2024-11-21T00:26:35,158 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/.tmp/hbase.id 2024-11-21T00:26:35,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:26:35,235 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/.tmp/hbase.id]:[hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/hbase.id] 2024-11-21T00:26:35,247 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:26:35,247 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:26:35,248 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:26:35,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:35,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:35,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:26:35,314 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:26:35,316 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:26:35,324 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:35,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:26:35,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:35,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:35,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:35,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:35,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:35,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:35,662 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:35,815 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store 2024-11-21T00:26:35,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:26:35,865 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:35,865 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:26:35,866 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:35,866 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:35,866 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:26:35,866 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:35,866 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:35,866 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148795865Disabling compacts and flushes for region at 1732148795865Disabling writes for close at 1732148795866 (+1 ms)Writing region close event to WAL at 1732148795866Closed at 1732148795866 2024-11-21T00:26:35,868 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/.initializing 2024-11-21T00:26:35,868 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/WALs/5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:35,870 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:26:35,871 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C35843%2C1732148794477, suffix=, logDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/WALs/5ed4808ef0e6,35843,1732148794477, archiveDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/oldWALs, maxLogs=10 2024-11-21T00:26:35,894 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/WALs/5ed4808ef0e6,35843,1732148794477/5ed4808ef0e6%2C35843%2C1732148794477.1732148795872, exclude list is [], retry=0 2024-11-21T00:26:35,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44701,DS-1d5598d7-0b5a-4c22-b7c8-be9810911139,DISK] 2024-11-21T00:26:35,935 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/WALs/5ed4808ef0e6,35843,1732148794477/5ed4808ef0e6%2C35843%2C1732148794477.1732148795872 2024-11-21T00:26:35,936 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42263:42263)] 2024-11-21T00:26:35,936 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:35,937 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:35,937 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:35,937 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:35,959 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:35,965 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:26:35,965 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:35,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:35,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:35,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:26:35,971 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:35,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:35,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:35,976 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:26:35,976 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:35,983 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:35,983 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:36,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:26:36,002 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:36,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:36,003 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:36,013 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:36,013 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:36,015 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:36,015 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:36,016 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:26:36,017 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:26:36,030 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:36,030 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59374921, jitterRate=-0.11524473130702972}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:26:36,031 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148795937Initializing all the Stores at 1732148795943 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148795943Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148795959 (+16 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148795959Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148795959Cleaning up temporary data from old regions at 1732148796015 (+56 ms)Region opened successfully at 1732148796031 (+16 ms) 2024-11-21T00:26:36,038 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:26:36,043 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d840e9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:26:36,044 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:26:36,044 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:26:36,045 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:26:36,045 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:26:36,046 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:26:36,046 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:26:36,046 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:26:36,074 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:26:36,075 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Unable to get data of znode /1-991210048/balancer because node does not exist (not necessarily an error) 2024-11-21T00:26:36,147 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-991210048/balancer already deleted, retry=false 2024-11-21T00:26:36,147 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:26:36,148 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Unable to get data of znode /1-991210048/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:26:36,177 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:26:36,230 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-991210048/normalizer already deleted, retry=false 2024-11-21T00:26:36,230 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:26:36,244 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Unable to get data of znode /1-991210048/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:26:36,292 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-991210048/switch/split already deleted, retry=false 2024-11-21T00:26:36,293 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Unable to get data of znode /1-991210048/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:26:36,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-991210048/switch/merge already deleted, retry=false 2024-11-21T00:26:36,317 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Unable to get data of znode /1-991210048/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:26:36,325 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-991210048/snapshot-cleanup already deleted, retry=false 2024-11-21T00:26:36,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-991210048/running 2024-11-21T00:26:36,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:36,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-991210048/running 2024-11-21T00:26:36,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:36,337 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,35843,1732148794477, sessionid=0x1015ac8e9d30003, setting cluster-up flag (Was=false) 2024-11-21T00:26:36,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:36,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:36,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:36,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:36,359 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:36,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:36,362 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:36,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:36,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:36,389 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-991210048/flush-table-proc/acquired, /1-991210048/flush-table-proc/reached, /1-991210048/flush-table-proc/abort 2024-11-21T00:26:36,390 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:36,392 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:26:36,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:36,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:36,452 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-991210048/online-snapshot/acquired, /1-991210048/online-snapshot/reached, /1-991210048/online-snapshot/abort 2024-11-21T00:26:36,453 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:36,466 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:26:36,478 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:36,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:26:36,479 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:26:36,479 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,35843,1732148794477 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:26:36,481 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(746): ClusterId : d093c870-e608-410e-9fad-5234a879f7a4 2024-11-21T00:26:36,481 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:26:36,496 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,504 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:26:36,504 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:26:36,517 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:26:36,518 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4af9d575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:26:36,523 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:36,523 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:26:36,526 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:36,526 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:26:36,561 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:38425 2024-11-21T00:26:36,561 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:26:36,561 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:26:36,561 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:26:36,564 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,35843,1732148794477 with port=38425, startcode=1732148794847 2024-11-21T00:26:36,564 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:26:36,575 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148826574 2024-11-21T00:26:36,575 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:26:36,575 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:26:36,575 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:26:36,575 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:26:36,575 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:26:36,575 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:26:36,588 INFO [HMaster-EventLoopGroup-29-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41885, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.13 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:26:36,589 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35843 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:26:36,590 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T00:26:36,590 WARN [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T00:26:36,606 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,614 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:26:36,614 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:26:36,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:26:36,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:26:36,635 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:26:36,635 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:26:36,636 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148796635,5,FailOnTimeoutGroup] 2024-11-21T00:26:36,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:26:36,650 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148796636,5,FailOnTimeoutGroup] 2024-11-21T00:26:36,651 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,651 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:26:36,651 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,651 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,691 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,35843,1732148794477 with port=38425, startcode=1732148794847 2024-11-21T00:26:36,696 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35843 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:36,696 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35843 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:36,703 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8 2024-11-21T00:26:36,703 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41951 2024-11-21T00:26:36,703 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:26:36,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048/rs 2024-11-21T00:26:36,749 DEBUG [RS:0;5ed4808ef0e6:38425 {}] zookeeper.ZKUtil(111): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on existing znode=/1-991210048/rs/5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:36,749 WARN [RS:0;5ed4808ef0e6:38425 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:26:36,749 INFO [RS:0;5ed4808ef0e6:38425 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:36,749 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:36,754 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,38425,1732148794847] 2024-11-21T00:26:36,797 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:26:36,808 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:26:36,812 INFO [RS:0;5ed4808ef0e6:38425 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:26:36,812 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,813 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:26:36,814 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:26:36,814 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,814 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,814 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:26:36,815 DEBUG [RS:0;5ed4808ef0e6:38425 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:26:36,829 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,829 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,829 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,829 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,829 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,829 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38425,1732148794847-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:26:36,864 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:26:36,864 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38425,1732148794847-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,864 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,864 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.Replication(171): 5ed4808ef0e6,38425,1732148794847 started 2024-11-21T00:26:36,886 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:36,886 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,38425,1732148794847, RpcServer on 5ed4808ef0e6/172.17.0.2:38425, sessionid=0x1015ac8e9d30004 2024-11-21T00:26:36,886 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:26:36,886 DEBUG [RS:0;5ed4808ef0e6:38425 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:36,886 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,38425,1732148794847' 2024-11-21T00:26:36,886 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-991210048/flush-table-proc/abort' 2024-11-21T00:26:36,887 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-991210048/flush-table-proc/acquired' 2024-11-21T00:26:36,888 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:26:36,888 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:26:36,888 DEBUG [RS:0;5ed4808ef0e6:38425 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:36,888 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,38425,1732148794847' 2024-11-21T00:26:36,888 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-991210048/online-snapshot/abort' 2024-11-21T00:26:36,888 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-991210048/online-snapshot/acquired' 2024-11-21T00:26:36,888 DEBUG [RS:0;5ed4808ef0e6:38425 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:26:36,888 INFO [RS:0;5ed4808ef0e6:38425 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:26:36,890 INFO [RS:0;5ed4808ef0e6:38425 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:26:36,990 INFO [RS:0;5ed4808ef0e6:38425 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:26:36,994 INFO [RS:0;5ed4808ef0e6:38425 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C38425%2C1732148794847, suffix=, logDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847, archiveDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/oldWALs, maxLogs=10 2024-11-21T00:26:37,024 DEBUG [RS:0;5ed4808ef0e6:38425 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, exclude list is [], retry=0 2024-11-21T00:26:37,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44701,DS-1d5598d7-0b5a-4c22-b7c8-be9810911139,DISK] 2024-11-21T00:26:37,041 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:26:37,042 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8 2024-11-21T00:26:37,081 INFO [RS:0;5ed4808ef0e6:38425 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 2024-11-21T00:26:37,124 DEBUG [RS:0;5ed4808ef0e6:38425 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42263:42263)] 2024-11-21T00:26:37,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:26:37,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:37,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:26:37,144 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:26:37,144 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,145 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:26:37,155 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:26:37,155 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,156 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:26:37,157 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:26:37,157 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,158 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:26:37,159 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:26:37,160 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,161 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:26:37,161 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740 2024-11-21T00:26:37,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740 2024-11-21T00:26:37,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:26:37,163 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:26:37,163 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:26:37,165 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:26:37,169 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:37,169 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65364197, jitterRate=-0.02599756419658661}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:26:37,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148797139Initializing all the Stores at 1732148797140 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148797140Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148797140Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148797140Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148797140Cleaning up temporary data from old regions at 1732148797163 (+23 ms)Region opened successfully at 1732148797169 (+6 ms) 2024-11-21T00:26:37,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:26:37,170 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:26:37,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:26:37,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:26:37,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:26:37,170 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:26:37,170 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148797170Disabling compacts and flushes for region at 1732148797170Disabling writes for close at 1732148797170Writing region close event to WAL at 1732148797170Closed at 1732148797170 2024-11-21T00:26:37,172 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:37,172 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:26:37,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:26:37,174 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:26:37,176 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:26:37,326 DEBUG [5ed4808ef0e6:35843 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:26:37,327 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:37,329 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,38425,1732148794847, state=OPENING 2024-11-21T00:26:37,346 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:26:37,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:37,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:37,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-991210048/meta-region-server: CHANGED 2024-11-21T00:26:37,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-991210048/meta-region-server: CHANGED 2024-11-21T00:26:37,414 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:26:37,414 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,38425,1732148794847}] 2024-11-21T00:26:37,588 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:26:37,612 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49627, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:26:37,632 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:26:37,632 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:37,632 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:26:37,641 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C38425%2C1732148794847.meta, suffix=.meta, logDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847, archiveDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/oldWALs, maxLogs=10 2024-11-21T00:26:37,680 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.meta.1732148797641.meta, exclude list is [], retry=0 2024-11-21T00:26:37,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44701,DS-1d5598d7-0b5a-4c22-b7c8-be9810911139,DISK] 2024-11-21T00:26:37,719 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.meta.1732148797641.meta 2024-11-21T00:26:37,733 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42263:42263)] 2024-11-21T00:26:37,733 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:37,733 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:26:37,733 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:26:37,734 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:26:37,734 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:26:37,734 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:26:37,734 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:37,734 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:26:37,734 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:26:37,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:26:37,755 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:26:37,756 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:26:37,758 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:26:37,758 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:26:37,773 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:26:37,773 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:26:37,774 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:26:37,774 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:26:37,775 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:26:37,776 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740 2024-11-21T00:26:37,777 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740 2024-11-21T00:26:37,778 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:26:37,778 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:26:37,779 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:26:37,780 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:26:37,784 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71930779, jitterRate=0.07185213267803192}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:26:37,785 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:26:37,785 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148797734Writing region info on filesystem at 1732148797734Initializing all the Stores at 1732148797736 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148797736Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148797736Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148797736Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148797736Cleaning up temporary data from old regions at 1732148797778 (+42 ms)Running coprocessor post-open hooks at 1732148797785 (+7 ms)Region opened successfully at 1732148797785 2024-11-21T00:26:37,786 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148797588 2024-11-21T00:26:37,789 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:26:37,789 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:26:37,794 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:37,794 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,38425,1732148794847, state=OPEN 2024-11-21T00:26:37,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-991210048/meta-region-server 2024-11-21T00:26:37,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-991210048/meta-region-server 2024-11-21T00:26:37,859 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-991210048/meta-region-server: CHANGED 2024-11-21T00:26:37,859 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:37,859 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-991210048/meta-region-server: CHANGED 2024-11-21T00:26:37,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:26:37,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,38425,1732148794847 in 445 msec 2024-11-21T00:26:37,865 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:26:37,865 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 690 msec 2024-11-21T00:26:37,866 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:26:37,866 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:26:37,868 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:37,868 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38425,1732148794847, seqNum=-1] 2024-11-21T00:26:37,868 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:37,869 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56207, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:26:37,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4150 sec 2024-11-21T00:26:37,890 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148797890, completionTime=-1 2024-11-21T00:26:37,890 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:26:37,890 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:26:37,894 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:26:37,894 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148857894 2024-11-21T00:26:37,894 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148917894 2024-11-21T00:26:37,894 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 4 msec 2024-11-21T00:26:37,895 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35843,1732148794477-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:37,895 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35843,1732148794477-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:37,895 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35843,1732148794477-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:37,895 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:35843, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:37,895 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:37,898 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:26:37,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.757sec 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35843,1732148794477-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:26:37,904 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35843,1732148794477-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:26:37,912 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72fd9f45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:37,913 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35843,-1 for getting cluster id 2024-11-21T00:26:37,913 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:37,924 DEBUG [HMaster-EventLoopGroup-29-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd093c870-e608-410e-9fad-5234a879f7a4' 2024-11-21T00:26:37,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:37,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d093c870-e608-410e-9fad-5234a879f7a4" 2024-11-21T00:26:37,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8009fc8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:37,925 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35843,-1] 2024-11-21T00:26:37,926 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:37,926 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:37,926 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:26:37,926 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:26:37,927 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35843,1732148794477-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:26:37,927 INFO [HMaster-EventLoopGroup-29-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39290, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:37,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11005890, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:37,929 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:37,930 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38425,1732148794847, seqNum=-1] 2024-11-21T00:26:37,931 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:37,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:26:37,936 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:37,937 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:57893 2024-11-21T00:26:37,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:26:37,971 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:37,972 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:37,972 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@8c85ef1 2024-11-21T00:26:37,972 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:37,973 INFO [HMaster-EventLoopGroup-27-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59372, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:26:37,974 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:26:37,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:26:37,977 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:26:37,977 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:37,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:26:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:37,980 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015ac8e9d30005 connected 2024-11-21T00:26:37,982 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:26:38,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:26:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:38,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:38,427 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a6e31f0ed205e4fb314ad9036d0360ce, NAME => 'test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4 2024-11-21T00:26:38,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:26:38,511 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:38,511 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing a6e31f0ed205e4fb314ad9036d0360ce, disabling compactions & flushes 2024-11-21T00:26:38,511 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:38,511 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:38,511 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. after waiting 0 ms 2024-11-21T00:26:38,511 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:38,511 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:38,511 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for a6e31f0ed205e4fb314ad9036d0360ce: Waiting for close lock at 1732148798511Disabling compacts and flushes for region at 1732148798511Disabling writes for close at 1732148798511Writing region close event to WAL at 1732148798511Closed at 1732148798511 2024-11-21T00:26:38,513 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:26:38,513 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148798513"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148798513"}]},"ts":"1732148798513"} 2024-11-21T00:26:38,519 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:26:38,521 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:26:38,521 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148798521"}]},"ts":"1732148798521"} 2024-11-21T00:26:38,524 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:26:38,524 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=a6e31f0ed205e4fb314ad9036d0360ce, ASSIGN}] 2024-11-21T00:26:38,526 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=a6e31f0ed205e4fb314ad9036d0360ce, ASSIGN 2024-11-21T00:26:38,533 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=a6e31f0ed205e4fb314ad9036d0360ce, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,43823,1732148788181; forceNewPlan=false, retain=false 2024-11-21T00:26:38,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:38,689 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a6e31f0ed205e4fb314ad9036d0360ce, regionState=OPENING, regionLocation=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:38,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=a6e31f0ed205e4fb314ad9036d0360ce, ASSIGN because future has completed 2024-11-21T00:26:38,698 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a6e31f0ed205e4fb314ad9036d0360ce, server=5ed4808ef0e6,43823,1732148788181}] 2024-11-21T00:26:38,912 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:38,912 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a6e31f0ed205e4fb314ad9036d0360ce, NAME => 'test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:38,913 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:26:38,913 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:26:38,913 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,913 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:38,913 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,913 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,925 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,933 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6e31f0ed205e4fb314ad9036d0360ce columnFamilyName f 2024-11-21T00:26:38,933 DEBUG [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:38,934 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] regionserver.HStore(327): Store=a6e31f0ed205e4fb314ad9036d0360ce/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:38,934 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,937 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6e31f0ed205e4fb314ad9036d0360ce columnFamilyName f1 2024-11-21T00:26:38,938 DEBUG [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:38,938 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] regionserver.HStore(327): Store=a6e31f0ed205e4fb314ad9036d0360ce/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:38,938 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,941 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a6e31f0ed205e4fb314ad9036d0360ce columnFamilyName norep 2024-11-21T00:26:38,942 DEBUG [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:38,942 INFO [StoreOpener-a6e31f0ed205e4fb314ad9036d0360ce-1 {}] regionserver.HStore(327): Store=a6e31f0ed205e4fb314ad9036d0360ce/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:38,942 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,943 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,943 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,945 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,945 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,946 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:26:38,947 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,965 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:38,966 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a6e31f0ed205e4fb314ad9036d0360ce; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75166965, jitterRate=0.12007506191730499}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:26:38,966 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:38,966 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a6e31f0ed205e4fb314ad9036d0360ce: Running coprocessor pre-open hook at 1732148798913Writing region info on filesystem at 1732148798913Initializing all the Stores at 1732148798916 (+3 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148798916Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148798925 (+9 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148798925Cleaning up temporary data from old regions at 1732148798945 (+20 ms)Running coprocessor post-open hooks at 1732148798966 (+21 ms)Region opened successfully at 1732148798966 2024-11-21T00:26:38,973 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce., pid=6, masterSystemTime=1732148798880 2024-11-21T00:26:38,981 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:38,981 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:38,981 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a6e31f0ed205e4fb314ad9036d0360ce, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:38,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a6e31f0ed205e4fb314ad9036d0360ce, server=5ed4808ef0e6,43823,1732148788181 because future has completed 2024-11-21T00:26:38,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:26:38,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a6e31f0ed205e4fb314ad9036d0360ce, server=5ed4808ef0e6,43823,1732148788181 in 295 msec 2024-11-21T00:26:38,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:26:38,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=a6e31f0ed205e4fb314ad9036d0360ce, ASSIGN in 471 msec 2024-11-21T00:26:38,998 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:26:38,998 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148798998"}]},"ts":"1732148798998"} 2024-11-21T00:26:39,002 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:26:39,003 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:26:39,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 1.0290 sec 2024-11-21T00:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:39,119 INFO [RPCClient-NioEventLoopGroup-4-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:26:39,119 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:39,120 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:39,120 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@310e6645 2024-11-21T00:26:39,120 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:39,121 INFO [HMaster-EventLoopGroup-29-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:26:39,122 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:26:39,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:26:39,125 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:39,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:26:39,126 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:39,129 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:26:39,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:26:39,209 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d7dc5d9799b9dbfd0669e5e4e687e303, NAME => 'test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8 2024-11-21T00:26:39,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:26:39,227 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:39,227 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing d7dc5d9799b9dbfd0669e5e4e687e303, disabling compactions & flushes 2024-11-21T00:26:39,227 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:39,227 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:39,227 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. after waiting 0 ms 2024-11-21T00:26:39,227 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:39,227 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:39,227 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for d7dc5d9799b9dbfd0669e5e4e687e303: Waiting for close lock at 1732148799227Disabling compacts and flushes for region at 1732148799227Disabling writes for close at 1732148799227Writing region close event to WAL at 1732148799227Closed at 1732148799227 2024-11-21T00:26:39,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:26:39,231 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148799231"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148799231"}]},"ts":"1732148799231"} 2024-11-21T00:26:39,233 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:26:39,235 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:26:39,235 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148799235"}]},"ts":"1732148799235"} 2024-11-21T00:26:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:39,238 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:26:39,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=d7dc5d9799b9dbfd0669e5e4e687e303, ASSIGN}] 2024-11-21T00:26:39,239 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=d7dc5d9799b9dbfd0669e5e4e687e303, ASSIGN 2024-11-21T00:26:39,242 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=d7dc5d9799b9dbfd0669e5e4e687e303, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,38425,1732148794847; forceNewPlan=false, retain=false 2024-11-21T00:26:39,393 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d7dc5d9799b9dbfd0669e5e4e687e303, regionState=OPENING, regionLocation=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:39,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=d7dc5d9799b9dbfd0669e5e4e687e303, ASSIGN because future has completed 2024-11-21T00:26:39,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7dc5d9799b9dbfd0669e5e4e687e303, server=5ed4808ef0e6,38425,1732148794847}] 2024-11-21T00:26:39,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:39,585 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:39,585 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => d7dc5d9799b9dbfd0669e5e4e687e303, NAME => 'test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:39,585 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:26:39,586 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:26:39,586 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,586 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:39,586 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,586 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,597 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,608 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d7dc5d9799b9dbfd0669e5e4e687e303 columnFamilyName f 2024-11-21T00:26:39,608 DEBUG [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:39,608 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] regionserver.HStore(327): Store=d7dc5d9799b9dbfd0669e5e4e687e303/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:39,608 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,610 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d7dc5d9799b9dbfd0669e5e4e687e303 columnFamilyName f1 2024-11-21T00:26:39,610 DEBUG [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:39,610 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] regionserver.HStore(327): Store=d7dc5d9799b9dbfd0669e5e4e687e303/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:39,610 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,611 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d7dc5d9799b9dbfd0669e5e4e687e303 columnFamilyName norep 2024-11-21T00:26:39,611 DEBUG [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:39,612 INFO [StoreOpener-d7dc5d9799b9dbfd0669e5e4e687e303-1 {}] regionserver.HStore(327): Store=d7dc5d9799b9dbfd0669e5e4e687e303/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:39,612 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,612 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,613 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,614 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,614 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,614 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:26:39,621 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,624 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:39,625 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened d7dc5d9799b9dbfd0669e5e4e687e303; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61445711, jitterRate=-0.08438755571842194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:26:39,625 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:39,625 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for d7dc5d9799b9dbfd0669e5e4e687e303: Running coprocessor pre-open hook at 1732148799586Writing region info on filesystem at 1732148799586Initializing all the Stores at 1732148799587 (+1 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148799587Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148799597 (+10 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148799597Cleaning up temporary data from old regions at 1732148799614 (+17 ms)Running coprocessor post-open hooks at 1732148799625 (+11 ms)Region opened successfully at 1732148799625 2024-11-21T00:26:39,626 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303., pid=6, masterSystemTime=1732148799566 2024-11-21T00:26:39,629 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:39,629 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:39,630 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=d7dc5d9799b9dbfd0669e5e4e687e303, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:39,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure d7dc5d9799b9dbfd0669e5e4e687e303, server=5ed4808ef0e6,38425,1732148794847 because future has completed 2024-11-21T00:26:39,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:26:39,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure d7dc5d9799b9dbfd0669e5e4e687e303, server=5ed4808ef0e6,38425,1732148794847 in 230 msec 2024-11-21T00:26:39,639 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:26:39,639 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=d7dc5d9799b9dbfd0669e5e4e687e303, ASSIGN in 398 msec 2024-11-21T00:26:39,640 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:26:39,640 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148799640"}]},"ts":"1732148799640"} 2024-11-21T00:26:39,642 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:26:39,644 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:26:39,646 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 522 msec 2024-11-21T00:26:39,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:26:39,768 INFO [RPCClient-NioEventLoopGroup-4-10 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:26:39,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67408adf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,776 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38531,-1 for getting cluster id 2024-11-21T00:26:39,776 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:39,777 DEBUG [HMaster-EventLoopGroup-27-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e753136-7b83-4eba-87dd-378bf22a8590' 2024-11-21T00:26:39,777 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:39,778 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e753136-7b83-4eba-87dd-378bf22a8590" 2024-11-21T00:26:39,778 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c60ef99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,778 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38531,-1] 2024-11-21T00:26:39,778 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:39,778 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:39,780 INFO [HMaster-EventLoopGroup-27-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59398, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:39,780 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e1e4f4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fd4376, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,793 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35843,-1 for getting cluster id 2024-11-21T00:26:39,793 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:39,794 DEBUG [HMaster-EventLoopGroup-29-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd093c870-e608-410e-9fad-5234a879f7a4' 2024-11-21T00:26:39,795 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:39,795 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d093c870-e608-410e-9fad-5234a879f7a4" 2024-11-21T00:26:39,795 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37fdb844, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,795 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35843,-1] 2024-11-21T00:26:39,795 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:39,796 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:39,798 INFO [HMaster-EventLoopGroup-29-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39314, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:39,799 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@613de0cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,814 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1341c454, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,814 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38531,-1 for getting cluster id 2024-11-21T00:26:39,814 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:39,815 DEBUG [HMaster-EventLoopGroup-27-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e753136-7b83-4eba-87dd-378bf22a8590' 2024-11-21T00:26:39,816 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:39,816 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e753136-7b83-4eba-87dd-378bf22a8590" 2024-11-21T00:26:39,816 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6886cab0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,816 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38531,-1] 2024-11-21T00:26:39,816 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:39,817 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:39,818 INFO [HMaster-EventLoopGroup-27-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59416, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:39,818 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cc8747a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,819 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:39,820 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:39,820 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@687e4af7 2024-11-21T00:26:39,820 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:39,822 INFO [HMaster-EventLoopGroup-27-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59430, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:26:39,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:35843,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:26:39,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:26:39,826 DEBUG [PEWorker-2 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:35843' 2024-11-21T00:26:39,827 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27a0614c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,827 DEBUG [PEWorker-2 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35843,-1 for getting cluster id 2024-11-21T00:26:39,827 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:39,828 DEBUG [HMaster-EventLoopGroup-29-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd093c870-e608-410e-9fad-5234a879f7a4' 2024-11-21T00:26:39,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:39,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d093c870-e608-410e-9fad-5234a879f7a4" 2024-11-21T00:26:39,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1862ed67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35843,-1] 2024-11-21T00:26:39,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:39,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:39,830 INFO [HMaster-EventLoopGroup-29-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:39,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ada3b2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:39,833 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:39,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:39,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6ca0b012 2024-11-21T00:26:39,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:39,835 INFO [HMaster-EventLoopGroup-29-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39342, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:26:39,836 INFO [PEWorker-2 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-2. 2024-11-21T00:26:39,836 DEBUG [PEWorker-2 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:26:39,836 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:39,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:39,837 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:39,838 INFO [PEWorker-2 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:26:39,839 DEBUG [PEWorker-2 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:26:39,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:39,841 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:26:39,842 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:39,843 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:26:39,896 DEBUG [PEWorker-2 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:26:39,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:26:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:40,312 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d26e6ab2bb8224b29ce4a3ac053d4a44, NAME => 'hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4 2024-11-21T00:26:40,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:26:40,323 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:40,323 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing d26e6ab2bb8224b29ce4a3ac053d4a44, disabling compactions & flushes 2024-11-21T00:26:40,323 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:40,323 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:40,323 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. after waiting 0 ms 2024-11-21T00:26:40,323 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:40,323 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:40,323 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for d26e6ab2bb8224b29ce4a3ac053d4a44: Waiting for close lock at 1732148800323Disabling compacts and flushes for region at 1732148800323Disabling writes for close at 1732148800323Writing region close event to WAL at 1732148800323Closed at 1732148800323 2024-11-21T00:26:40,325 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:26:40,325 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148800325"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148800325"}]},"ts":"1732148800325"} 2024-11-21T00:26:40,327 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:26:40,328 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:26:40,328 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148800328"}]},"ts":"1732148800328"} 2024-11-21T00:26:40,330 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:26:40,330 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d26e6ab2bb8224b29ce4a3ac053d4a44, ASSIGN}] 2024-11-21T00:26:40,332 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d26e6ab2bb8224b29ce4a3ac053d4a44, ASSIGN 2024-11-21T00:26:40,333 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=d26e6ab2bb8224b29ce4a3ac053d4a44, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,43823,1732148788181; forceNewPlan=false, retain=false 2024-11-21T00:26:40,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:40,484 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d26e6ab2bb8224b29ce4a3ac053d4a44, regionState=OPENING, regionLocation=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:40,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=d26e6ab2bb8224b29ce4a3ac053d4a44, ASSIGN because future has completed 2024-11-21T00:26:40,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure d26e6ab2bb8224b29ce4a3ac053d4a44, server=5ed4808ef0e6,43823,1732148788181}] 2024-11-21T00:26:40,655 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:40,655 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:40,656 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:26:40,658 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C43823%2C1732148788181.rep, suffix=, logDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181, archiveDir=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/oldWALs, maxLogs=10 2024-11-21T00:26:40,681 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.rep.1732148800658, exclude list is [], retry=0 2024-11-21T00:26:40,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44835,DS-6e5bc986-7d7f-441a-a6dc-2b7c4a3e8340,DISK] 2024-11-21T00:26:40,714 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.rep.1732148800658 2024-11-21T00:26:40,715 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46821:46821)] 2024-11-21T00:26:40,715 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => d26e6ab2bb8224b29ce4a3ac053d4a44, NAME => 'hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:40,716 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:26:40,716 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:26:40,716 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. service=MultiRowMutationService 2024-11-21T00:26:40,716 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:26:40,716 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,716 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:40,716 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,716 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,728 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,733 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d26e6ab2bb8224b29ce4a3ac053d4a44 columnFamilyName hfileref 2024-11-21T00:26:40,733 DEBUG [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:40,734 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] regionserver.HStore(327): Store=d26e6ab2bb8224b29ce4a3ac053d4a44/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:40,734 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,736 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d26e6ab2bb8224b29ce4a3ac053d4a44 columnFamilyName queue 2024-11-21T00:26:40,736 DEBUG [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:40,737 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] regionserver.HStore(327): Store=d26e6ab2bb8224b29ce4a3ac053d4a44/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:40,739 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,741 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d26e6ab2bb8224b29ce4a3ac053d4a44 columnFamilyName sid 2024-11-21T00:26:40,741 DEBUG [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:40,741 INFO [StoreOpener-d26e6ab2bb8224b29ce4a3ac053d4a44-1 {}] regionserver.HStore(327): Store=d26e6ab2bb8224b29ce4a3ac053d4a44/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:40,742 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,742 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,743 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,744 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,744 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,745 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:26:40,746 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,749 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:40,750 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened d26e6ab2bb8224b29ce4a3ac053d4a44; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61695812, jitterRate=-0.08066076040267944}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:26:40,750 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:40,750 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for d26e6ab2bb8224b29ce4a3ac053d4a44: Running coprocessor pre-open hook at 1732148800717Writing region info on filesystem at 1732148800717Initializing all the Stores at 1732148800717Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148800717Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148800728 (+11 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148800728Cleaning up temporary data from old regions at 1732148800744 (+16 ms)Running coprocessor post-open hooks at 1732148800750 (+6 ms)Region opened successfully at 1732148800750 2024-11-21T00:26:40,751 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44., pid=10, masterSystemTime=1732148800646 2024-11-21T00:26:40,754 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d26e6ab2bb8224b29ce4a3ac053d4a44, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:40,755 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:40,756 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:40,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure d26e6ab2bb8224b29ce4a3ac053d4a44, server=5ed4808ef0e6,43823,1732148788181 because future has completed 2024-11-21T00:26:40,760 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:26:40,761 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure d26e6ab2bb8224b29ce4a3ac053d4a44, server=5ed4808ef0e6,43823,1732148788181 in 265 msec 2024-11-21T00:26:40,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:26:40,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d26e6ab2bb8224b29ce4a3ac053d4a44, ASSIGN in 430 msec 2024-11-21T00:26:40,764 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:26:40,764 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148800764"}]},"ts":"1732148800764"} 2024-11-21T00:26:40,766 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:26:40,768 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:26:40,770 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 930 msec 2024-11-21T00:26:40,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44., hostname=5ed4808ef0e6,43823,1732148788181, seqNum=2] 2024-11-21T00:26:40,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:40,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:40,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:40,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:26:41,150 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43823 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:26:41,160 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:26:41,212 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,43823,1732148788181, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:26:41,212 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:41,212 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43823,1732148788181, seqNum=-1] 2024-11-21T00:26:41,213 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:41,214 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-28-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42783, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.12 (auth:SIMPLE), service=ClientService 2024-11-21T00:26:41,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,43823,1732148788181', locateType=CURRENT is [region=hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44., hostname=5ed4808ef0e6,43823,1732148788181, seqNum=2] 2024-11-21T00:26:41,226 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:26:41,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:26:41,230 INFO [PEWorker-5 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,43823,1732148788181 suceeded 2024-11-21T00:26:41,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:26:41,232 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 233 msec 2024-11-21T00:26:41,232 INFO [PEWorker-3 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:35843,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:26:41,239 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.4100 sec 2024-11-21T00:26:41,257 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:35843' 2024-11-21T00:26:41,265 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@7d646ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:41,265 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35843,-1 for getting cluster id 2024-11-21T00:26:41,266 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:41,273 DEBUG [HMaster-EventLoopGroup-29-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd093c870-e608-410e-9fad-5234a879f7a4' 2024-11-21T00:26:41,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:41,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d093c870-e608-410e-9fad-5234a879f7a4" 2024-11-21T00:26:41,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@2f9e19ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:41,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35843,-1] 2024-11-21T00:26:41,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:41,274 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:41,275 INFO [HMaster-EventLoopGroup-29-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.12 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:41,276 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@24d722b5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:41,276 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:41,277 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:41,277 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3bb80d6c 2024-11-21T00:26:41,277 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:41,278 INFO [HMaster-EventLoopGroup-29-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39366, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.12 (auth:SIMPLE), service=MasterService 2024-11-21T00:26:41,279 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,43823,1732148788181 (queues=1) is replicating from cluster=5e753136-7b83-4eba-87dd-378bf22a8590 to cluster=d093c870-e608-410e-9fad-5234a879f7a4 2024-11-21T00:26:41,279 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C43823%2C1732148788181 2024-11-21T00:26:41,279 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,43823,1732148788181, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:26:41,292 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C43823%2C1732148788181 2024-11-21T00:26:41,304 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, startPosition=0, beingWritten=true 2024-11-21T00:26:41,317 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:41,317 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 379, reset compression=false 2024-11-21T00:26:41,317 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43823,1732148788181 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:41,541 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 379, reset compression=false 2024-11-21T00:26:41,877 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 379, reset compression=false 2024-11-21T00:26:41,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38531 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:41,988 INFO [RPCClient-NioEventLoopGroup-4-1 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:26:41,988 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:26:41,988 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.setUpClusterTablesAndPeers(TestMasterReplication.java:232) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication1(TestMasterReplication.java:152) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:41,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:41,989 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:41,989 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:41,996 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26c886f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:41,996 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35843,-1 for getting cluster id 2024-11-21T00:26:41,997 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:41,998 DEBUG [HMaster-EventLoopGroup-29-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd093c870-e608-410e-9fad-5234a879f7a4' 2024-11-21T00:26:41,998 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:41,998 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d093c870-e608-410e-9fad-5234a879f7a4" 2024-11-21T00:26:41,998 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@734a40d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:41,999 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35843,-1] 2024-11-21T00:26:41,999 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:41,999 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:42,000 INFO [HMaster-EventLoopGroup-29-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44988, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:42,002 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15740fbf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:42,003 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:42,003 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:42,003 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37131024 2024-11-21T00:26:42,003 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:42,005 INFO [HMaster-EventLoopGroup-29-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44998, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:26:42,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:38531,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:26:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:26:42,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:42,008 DEBUG [PEWorker-2 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:38531' 2024-11-21T00:26:42,026 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6695c692, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:42,026 DEBUG [PEWorker-2 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38531,-1 for getting cluster id 2024-11-21T00:26:42,027 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:42,028 DEBUG [HMaster-EventLoopGroup-27-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e753136-7b83-4eba-87dd-378bf22a8590' 2024-11-21T00:26:42,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:42,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e753136-7b83-4eba-87dd-378bf22a8590" 2024-11-21T00:26:42,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305e704b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:42,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38531,-1] 2024-11-21T00:26:42,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:42,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:42,029 INFO [HMaster-EventLoopGroup-27-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50492, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:42,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@702acb00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:42,031 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:42,035 DEBUG [PEWorker-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:42,035 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7ac78d67 2024-11-21T00:26:42,035 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:42,036 INFO [HMaster-EventLoopGroup-27-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50506, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:26:42,038 INFO [PEWorker-2 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-2. 2024-11-21T00:26:42,038 DEBUG [PEWorker-2 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:26:42,038 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:42,039 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:42,039 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:42,039 INFO [PEWorker-2 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:26:42,040 DEBUG [PEWorker-2 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:26:42,042 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:26:42,042 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:42,043 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:26:42,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:26:42,081 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 88d78059a3705c06013145894aab6de7, NAME => 'hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8 2024-11-21T00:26:42,091 DEBUG [PEWorker-2 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:26:42,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:42,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:26:42,286 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 379, reset compression=false 2024-11-21T00:26:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:42,394 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:26:42,394 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:26:42,556 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:42,556 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 88d78059a3705c06013145894aab6de7, disabling compactions & flushes 2024-11-21T00:26:42,556 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:42,556 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:42,556 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. after waiting 0 ms 2024-11-21T00:26:42,556 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:42,556 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:42,556 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 88d78059a3705c06013145894aab6de7: Waiting for close lock at 1732148802556Disabling compacts and flushes for region at 1732148802556Disabling writes for close at 1732148802556Writing region close event to WAL at 1732148802556Closed at 1732148802556 2024-11-21T00:26:42,558 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:26:42,558 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148802558"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148802558"}]},"ts":"1732148802558"} 2024-11-21T00:26:42,562 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:26:42,563 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:26:42,563 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148802563"}]},"ts":"1732148802563"} 2024-11-21T00:26:42,566 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:26:42,567 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=88d78059a3705c06013145894aab6de7, ASSIGN}] 2024-11-21T00:26:42,569 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=88d78059a3705c06013145894aab6de7, ASSIGN 2024-11-21T00:26:42,569 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=88d78059a3705c06013145894aab6de7, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,38425,1732148794847; forceNewPlan=false, retain=false 2024-11-21T00:26:42,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:42,720 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=88d78059a3705c06013145894aab6de7, regionState=OPENING, regionLocation=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:42,723 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=88d78059a3705c06013145894aab6de7, ASSIGN because future has completed 2024-11-21T00:26:42,724 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 88d78059a3705c06013145894aab6de7, server=5ed4808ef0e6,38425,1732148794847}] 2024-11-21T00:26:42,813 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 379, reset compression=false 2024-11-21T00:26:42,913 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:42,913 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:26:42,913 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:26:42,915 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C38425%2C1732148794847.rep, suffix=, logDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847, archiveDir=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/oldWALs, maxLogs=10 2024-11-21T00:26:42,939 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.rep.1732148802916, exclude list is [], retry=0 2024-11-21T00:26:42,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44701,DS-1d5598d7-0b5a-4c22-b7c8-be9810911139,DISK] 2024-11-21T00:26:42,947 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.rep.1732148802916 2024-11-21T00:26:42,949 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42263:42263)] 2024-11-21T00:26:42,950 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 88d78059a3705c06013145894aab6de7, NAME => 'hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:26:42,950 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:26:42,950 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:26:42,950 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. service=MultiRowMutationService 2024-11-21T00:26:42,950 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:26:42,950 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,951 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:26:42,951 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,951 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,972 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,974 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 88d78059a3705c06013145894aab6de7 columnFamilyName hfileref 2024-11-21T00:26:42,974 DEBUG [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:42,976 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] regionserver.HStore(327): Store=88d78059a3705c06013145894aab6de7/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:42,976 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,978 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 88d78059a3705c06013145894aab6de7 columnFamilyName queue 2024-11-21T00:26:42,978 DEBUG [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:42,978 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] regionserver.HStore(327): Store=88d78059a3705c06013145894aab6de7/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:42,978 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,979 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 88d78059a3705c06013145894aab6de7 columnFamilyName sid 2024-11-21T00:26:42,979 DEBUG [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:26:42,980 INFO [StoreOpener-88d78059a3705c06013145894aab6de7-1 {}] regionserver.HStore(327): Store=88d78059a3705c06013145894aab6de7/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:26:42,980 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,981 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,981 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,991 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,991 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:42,992 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:26:42,993 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:43,010 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:26:43,011 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 88d78059a3705c06013145894aab6de7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59503241, jitterRate=-0.11333261430263519}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:26:43,011 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:43,011 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 88d78059a3705c06013145894aab6de7: Running coprocessor pre-open hook at 1732148802951Writing region info on filesystem at 1732148802951Initializing all the Stores at 1732148802953 (+2 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148802953Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148802972 (+19 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148802972Cleaning up temporary data from old regions at 1732148802991 (+19 ms)Running coprocessor post-open hooks at 1732148803011 (+20 ms)Region opened successfully at 1732148803011 2024-11-21T00:26:43,012 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7., pid=10, masterSystemTime=1732148802885 2024-11-21T00:26:43,015 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=88d78059a3705c06013145894aab6de7, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:43,015 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:43,016 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:43,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 88d78059a3705c06013145894aab6de7, server=5ed4808ef0e6,38425,1732148794847 because future has completed 2024-11-21T00:26:43,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:26:43,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 88d78059a3705c06013145894aab6de7, server=5ed4808ef0e6,38425,1732148794847 in 309 msec 2024-11-21T00:26:43,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:26:43,038 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=88d78059a3705c06013145894aab6de7, ASSIGN in 469 msec 2024-11-21T00:26:43,039 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:26:43,039 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148803039"}]},"ts":"1732148803039"} 2024-11-21T00:26:43,042 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:26:43,043 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:26:43,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 1.0040 sec 2024-11-21T00:26:43,097 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7., hostname=5ed4808ef0e6,38425,1732148794847, seqNum=2] 2024-11-21T00:26:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:43,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:43,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:43,200 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:26:43,237 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:26:43,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:43,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:43,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:43,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:43,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:43,337 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:43,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38425 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:26:43,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:43,363 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:26:43,443 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,38425,1732148794847, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:26:43,444 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:43,444 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38425,1732148794847, seqNum=-1] 2024-11-21T00:26:43,445 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:43,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36425, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.13 (auth:SIMPLE), service=ClientService 2024-11-21T00:26:43,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,38425,1732148794847', locateType=CURRENT is [region=hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7., hostname=5ed4808ef0e6,38425,1732148794847, seqNum=2] 2024-11-21T00:26:43,453 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 379, reset compression=false 2024-11-21T00:26:43,460 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:26:43,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:26:43,463 INFO [PEWorker-1 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,38425,1732148794847 suceeded 2024-11-21T00:26:43,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:26:43,465 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 263 msec 2024-11-21T00:26:43,465 INFO [PEWorker-1 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:38531,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:26:43,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.4600 sec 2024-11-21T00:26:43,484 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:38531' 2024-11-21T00:26:43,488 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1bc41d15, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:43,488 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38531,-1 for getting cluster id 2024-11-21T00:26:43,488 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:43,489 DEBUG [HMaster-EventLoopGroup-27-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e753136-7b83-4eba-87dd-378bf22a8590' 2024-11-21T00:26:43,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:43,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e753136-7b83-4eba-87dd-378bf22a8590" 2024-11-21T00:26:43,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@7d00c3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:43,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38531,-1] 2024-11-21T00:26:43,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:43,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:43,491 INFO [HMaster-EventLoopGroup-27-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50524, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.13 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:43,492 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@534baae5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:43,492 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:26:43,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:43,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@13139d4f 2024-11-21T00:26:43,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:26:43,494 INFO [HMaster-EventLoopGroup-27-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.13 (auth:SIMPLE), service=MasterService 2024-11-21T00:26:43,496 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,38425,1732148794847 (queues=1) is replicating from cluster=d093c870-e608-410e-9fad-5234a879f7a4 to cluster=5e753136-7b83-4eba-87dd-378bf22a8590 2024-11-21T00:26:43,496 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C38425%2C1732148794847 2024-11-21T00:26:43,496 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,38425,1732148794847, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:26:43,497 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, startPosition=0, beingWritten=true 2024-11-21T00:26:43,497 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C38425%2C1732148794847 2024-11-21T00:26:43,529 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:43,529 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 379, reset compression=false 2024-11-21T00:26:43,534 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38425,1732148794847 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:43,782 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 379, reset compression=false 2024-11-21T00:26:44,087 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 379, reset compression=false 2024-11-21T00:26:44,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35843 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:26:44,158 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:26:44,158 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:26:44,158 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.setUpClusterTablesAndPeers(TestMasterReplication.java:233) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication1(TestMasterReplication.java:152) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:44,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:44,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:44,158 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:44,159 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:44,160 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43823,1732148788181, seqNum=-1] 2024-11-21T00:26:44,164 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:44,165 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-28-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42500, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:26:44,167 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce., hostname=5ed4808ef0e6,43823,1732148788181, seqNum=2] 2024-11-21T00:26:44,173 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:44,174 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38425,1732148794847, seqNum=-1] 2024-11-21T00:26:44,175 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:44,176 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37056, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:26:44,182 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303., hostname=5ed4808ef0e6,38425,1732148794847, seqNum=2] 2024-11-21T00:26:44,187 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 379, reset compression=false 2024-11-21T00:26:44,192 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row. IsDeleteReplication:false 2024-11-21T00:26:44,214 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:26:44,214 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 480, reset compression=false 2024-11-21T00:26:44,214 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43823,1732148788181 got entry batch from reader: WALEntryBatch [walEntries=[{test/a6e31f0ed205e4fb314ad9036d0360ce/4=[#edits: 1 = ],199}], lastWalPath=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, lastWalPosition=480, nbRowKeys=1, nbHFiles=0, heapSize=199, lastSeqIds={}, endOfFile=false,usedBufferSize=199] 2024-11-21T00:26:44,220 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:26:44,221 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:26:44,224 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37070, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.12 (auth:SIMPLE), service=AdminService 2024-11-21T00:26:44,226 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:26:44,231 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@646464a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:44,231 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35843,-1 for getting cluster id 2024-11-21T00:26:44,231 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:44,233 DEBUG [HMaster-EventLoopGroup-29-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd093c870-e608-410e-9fad-5234a879f7a4' 2024-11-21T00:26:44,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:44,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d093c870-e608-410e-9fad-5234a879f7a4" 2024-11-21T00:26:44,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@344111f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:44,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35843,-1] 2024-11-21T00:26:44,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:44,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:44,236 INFO [HMaster-EventLoopGroup-29-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45020, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.13 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:44,239 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@59aac51d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:44,247 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:44,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38425,1732148794847, seqNum=-1] 2024-11-21T00:26:44,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:44,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37074, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.13 (auth:SIMPLE), service=ClientService 2024-11-21T00:26:44,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303., hostname=5ed4808ef0e6,38425,1732148794847, seqNum=2] 2024-11-21T00:26:44,350 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:26:44,441 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 480, reset compression=false 2024-11-21T00:26:44,530 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 379, reset compression=false 2024-11-21T00:26:44,549 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:44,549 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 503, reset compression=false 2024-11-21T00:26:44,549 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38425,1732148794847 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, lastWalPosition=503, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:44,756 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 503, reset compression=false 2024-11-21T00:26:44,774 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 480, reset compression=false 2024-11-21T00:26:45,085 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 503, reset compression=false 2024-11-21T00:26:45,202 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row. IsDeleteReplication:false 2024-11-21T00:26:45,220 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row1. IsDeleteReplication:false 2024-11-21T00:26:45,224 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 480, reset compression=false 2024-11-21T00:26:45,525 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 503, reset compression=false 2024-11-21T00:26:45,536 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:26:45,536 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 607, reset compression=false 2024-11-21T00:26:45,536 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38425,1732148794847 got entry batch from reader: WALEntryBatch [walEntries=[{test/d7dc5d9799b9dbfd0669e5e4e687e303/5=[#edits: 1 = ],207}], lastWalPath=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, lastWalPosition=607, nbRowKeys=1, nbHFiles=0, heapSize=207, lastSeqIds={}, endOfFile=false,usedBufferSize=207] 2024-11-21T00:26:45,542 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:26:45,544 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-28-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42512, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.13 (auth:SIMPLE), service=AdminService 2024-11-21T00:26:45,545 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:26:45,557 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@71900990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:45,557 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38531,-1 for getting cluster id 2024-11-21T00:26:45,557 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:26:45,558 DEBUG [HMaster-EventLoopGroup-27-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5e753136-7b83-4eba-87dd-378bf22a8590' 2024-11-21T00:26:45,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:26:45,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5e753136-7b83-4eba-87dd-378bf22a8590" 2024-11-21T00:26:45,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@4ee43068, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:45,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38531,-1] 2024-11-21T00:26:45,562 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:26:45,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:45,563 INFO [HMaster-EventLoopGroup-27-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50530, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.12 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:26:45,564 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@6092afd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:26:45,566 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:26:45,573 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43823,1732148788181, seqNum=-1] 2024-11-21T00:26:45,581 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:26:45,586 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-28-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.12 (auth:SIMPLE), service=ClientService 2024-11-21T00:26:45,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-30-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row1', locateType=CURRENT is [region=test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce., hostname=5ed4808ef0e6,43823,1732148788181, seqNum=2] 2024-11-21T00:26:45,593 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:26:45,745 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 480, reset compression=false 2024-11-21T00:26:45,760 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:45,760 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 608, reset compression=false 2024-11-21T00:26:45,761 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43823,1732148788181 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, lastWalPosition=608, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:45,772 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 607, reset compression=false 2024-11-21T00:26:46,013 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 608, reset compression=false 2024-11-21T00:26:46,133 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 607, reset compression=false 2024-11-21T00:26:46,222 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row1. IsDeleteReplication:false 2024-11-21T00:26:46,231 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row. IsDeleteReplication:true 2024-11-21T00:26:46,335 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 608, reset compression=false 2024-11-21T00:26:46,344 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:26:46,344 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 764, reset compression=false 2024-11-21T00:26:46,347 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43823,1732148788181 got entry batch from reader: WALEntryBatch [walEntries=[{test/a6e31f0ed205e4fb314ad9036d0360ce/6=[#edits: 2 = ],271}], lastWalPath=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, lastWalPosition=764, nbRowKeys=1, nbHFiles=0, heapSize=271, lastSeqIds={}, endOfFile=false,usedBufferSize=271] 2024-11-21T00:26:46,349 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:26:46,353 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=38425 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:26:46,537 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 607, reset compression=false 2024-11-21T00:26:46,568 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:46,568 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 754, reset compression=false 2024-11-21T00:26:46,568 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38425,1732148794847 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, lastWalPosition=754, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:46,569 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 764, reset compression=false 2024-11-21T00:26:46,789 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 754, reset compression=false 2024-11-21T00:26:46,901 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 764, reset compression=false 2024-11-21T00:26:47,100 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 754, reset compression=false 2024-11-21T00:26:47,199 WARN [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 10, running: 0 2024-11-21T00:26:47,234 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row. IsDeleteReplication:true 2024-11-21T00:26:47,238 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row1. IsDeleteReplication:true 2024-11-21T00:26:47,308 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 764, reset compression=false 2024-11-21T00:26:47,373 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:26:47,517 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 754, reset compression=false 2024-11-21T00:26:47,522 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:26:47,523 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 913, reset compression=false 2024-11-21T00:26:47,523 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38425,1732148794847 got entry batch from reader: WALEntryBatch [walEntries=[{test/d7dc5d9799b9dbfd0669e5e4e687e303/7=[#edits: 2 = ],279}], lastWalPath=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, lastWalPosition=913, nbRowKeys=1, nbHFiles=0, heapSize=279, lastSeqIds={}, endOfFile=false,usedBufferSize=279] 2024-11-21T00:26:47,524 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:26:47,527 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=43823 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:26:47,733 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 913, reset compression=false 2024-11-21T00:26:47,811 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 764, reset compression=false 2024-11-21T00:26:47,816 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:47,816 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:47,816 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43823,1732148788181 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, lastWalPosition=914, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:48,020 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:48,036 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 913, reset compression=false 2024-11-21T00:26:48,241 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row1. IsDeleteReplication:true 2024-11-21T00:26:48,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:26:48,244 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:26:48,244 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication1(TestMasterReplication.java:167) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:48,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:48,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:48,244 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:48,244 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:26:48,244 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=504264324, stopped=false 2024-11-21T00:26:48,244 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,35843,1732148794477 2024-11-21T00:26:48,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-991210048/running 2024-11-21T00:26:48,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-991210048/running 2024-11-21T00:26:48,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:48,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:48,268 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:26:48,268 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:26:48,268 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication1(TestMasterReplication.java:167) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:48,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:48,269 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,38425,1732148794847' ***** 2024-11-21T00:26:48,269 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:26:48,269 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:26:48,269 INFO [RS:0;5ed4808ef0e6:38425 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:26:48,269 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:26:48,269 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on znode that does not yet exist, /1-991210048/running 2024-11-21T00:26:48,269 INFO [RS:0;5ed4808ef0e6:38425 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:26:48,270 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Set watcher on znode that does not yet exist, /1-991210048/running 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(3091): Received CLOSE for d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(3091): Received CLOSE for 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:38425. 2024-11-21T00:26:48,270 DEBUG [RS:0;5ed4808ef0e6:38425 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:48,270 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d7dc5d9799b9dbfd0669e5e4e687e303, disabling compactions & flushes 2024-11-21T00:26:48,270 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:26:48,270 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:26:48,270 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:48,270 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:26:48,270 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. after waiting 0 ms 2024-11-21T00:26:48,270 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:48,271 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d7dc5d9799b9dbfd0669e5e4e687e303 3/3 column families, dataSize=192 B heapSize=1.46 KB 2024-11-21T00:26:48,271 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:26:48,271 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1325): Online Regions={d7dc5d9799b9dbfd0669e5e4e687e303=test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303., 1588230740=hbase:meta,,1.1588230740, 88d78059a3705c06013145894aab6de7=hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7.} 2024-11-21T00:26:48,271 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 88d78059a3705c06013145894aab6de7, d7dc5d9799b9dbfd0669e5e4e687e303 2024-11-21T00:26:48,271 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:26:48,271 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:26:48,271 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:26:48,271 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:26:48,271 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:26:48,271 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:26:48,288 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/info/aa0fea9b0f2e4d05bbb3d1d2085d7181 is 147, key is hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7./info:regioninfo/1732148803015/Put/seqid=0 2024-11-21T00:26:48,289 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/f/8786e1ff65c24e0db1db8b292a61cac8 is 29, key is row1/f:/1732148807235/DeleteFamily/seqid=0 2024-11-21T00:26:48,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741841_1017 (size=5118) 2024-11-21T00:26:48,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741840_1016 (size=7686) 2024-11-21T00:26:48,294 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=112 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/f/8786e1ff65c24e0db1db8b292a61cac8 2024-11-21T00:26:48,298 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8786e1ff65c24e0db1db8b292a61cac8 2024-11-21T00:26:48,300 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:26:48,313 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/f1/1101bf893cd9472e841b3f115ea1661b is 30, key is row1/f1:/1732148807235/DeleteFamily/seqid=0 2024-11-21T00:26:48,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741842_1018 (size=5123) 2024-11-21T00:26:48,321 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/f1/1101bf893cd9472e841b3f115ea1661b 2024-11-21T00:26:48,324 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:48,326 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1101bf893cd9472e841b3f115ea1661b 2024-11-21T00:26:48,352 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/norep/8f3175dbed044270b890f442b61eecaa is 33, key is row1/norep:/1732148807235/DeleteFamily/seqid=0 2024-11-21T00:26:48,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741843_1019 (size=5108) 2024-11-21T00:26:48,362 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/norep/8f3175dbed044270b890f442b61eecaa 2024-11-21T00:26:48,369 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8f3175dbed044270b890f442b61eecaa 2024-11-21T00:26:48,370 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/f/8786e1ff65c24e0db1db8b292a61cac8 as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/f/8786e1ff65c24e0db1db8b292a61cac8 2024-11-21T00:26:48,375 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8786e1ff65c24e0db1db8b292a61cac8 2024-11-21T00:26:48,375 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/f/8786e1ff65c24e0db1db8b292a61cac8, entries=2, sequenceid=8, filesize=5.0 K 2024-11-21T00:26:48,375 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/f1/1101bf893cd9472e841b3f115ea1661b as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/f1/1101bf893cd9472e841b3f115ea1661b 2024-11-21T00:26:48,380 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1101bf893cd9472e841b3f115ea1661b 2024-11-21T00:26:48,380 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/f1/1101bf893cd9472e841b3f115ea1661b, entries=2, sequenceid=8, filesize=5.0 K 2024-11-21T00:26:48,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/.tmp/norep/8f3175dbed044270b890f442b61eecaa as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/norep/8f3175dbed044270b890f442b61eecaa 2024-11-21T00:26:48,386 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8f3175dbed044270b890f442b61eecaa 2024-11-21T00:26:48,386 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/norep/8f3175dbed044270b890f442b61eecaa, entries=1, sequenceid=8, filesize=5.0 K 2024-11-21T00:26:48,387 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~192 B/192, heapSize ~1.41 KB/1448, currentSize=0 B/0 for d7dc5d9799b9dbfd0669e5e4e687e303 in 116ms, sequenceid=8, compaction requested=false 2024-11-21T00:26:48,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/default/test/d7dc5d9799b9dbfd0669e5e4e687e303/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-21T00:26:48,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:26:48,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:26:48,395 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:48,395 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d7dc5d9799b9dbfd0669e5e4e687e303: Waiting for close lock at 1732148808270Running coprocessor pre-close hooks at 1732148808270Disabling compacts and flushes for region at 1732148808270Disabling writes for close at 1732148808270Obtaining lock to block concurrent updates at 1732148808271 (+1 ms)Preparing flush snapshotting stores in d7dc5d9799b9dbfd0669e5e4e687e303 at 1732148808271Finished memstore snapshotting test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303., syncing WAL and waiting on mvcc, flushsize=dataSize=192, getHeapSize=1448, getOffHeapSize=0, getCellsCount=7 at 1732148808271Flushing stores of test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. at 1732148808271Flushing d7dc5d9799b9dbfd0669e5e4e687e303/f: creating writer at 1732148808272 (+1 ms)Flushing d7dc5d9799b9dbfd0669e5e4e687e303/f: appending metadata at 1732148808288 (+16 ms)Flushing d7dc5d9799b9dbfd0669e5e4e687e303/f: closing flushed file at 1732148808288Flushing d7dc5d9799b9dbfd0669e5e4e687e303/f1: creating writer at 1732148808299 (+11 ms)Flushing d7dc5d9799b9dbfd0669e5e4e687e303/f1: appending metadata at 1732148808312 (+13 ms)Flushing d7dc5d9799b9dbfd0669e5e4e687e303/f1: closing flushed file at 1732148808312Flushing d7dc5d9799b9dbfd0669e5e4e687e303/norep: creating writer at 1732148808327 (+15 ms)Flushing d7dc5d9799b9dbfd0669e5e4e687e303/norep: appending metadata at 1732148808351 (+24 ms)Flushing d7dc5d9799b9dbfd0669e5e4e687e303/norep: closing flushed file at 1732148808351Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73566c1d: reopening flushed file at 1732148808370 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@632d34de: reopening flushed file at 1732148808375 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@66b16a56: reopening flushed file at 1732148808381 (+6 ms)Finished flush of dataSize ~192 B/192, heapSize ~1.41 KB/1448, currentSize=0 B/0 for d7dc5d9799b9dbfd0669e5e4e687e303 in 116ms, sequenceid=8, compaction requested=false at 1732148808387 (+6 ms)Writing region close event to WAL at 1732148808388 (+1 ms)Running coprocessor post-close hooks at 1732148808395 (+7 ms)Closed at 1732148808395 2024-11-21T00:26:48,396 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303. 2024-11-21T00:26:48,396 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 88d78059a3705c06013145894aab6de7, disabling compactions & flushes 2024-11-21T00:26:48,396 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:48,396 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:48,396 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. after waiting 0 ms 2024-11-21T00:26:48,396 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:48,396 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 88d78059a3705c06013145894aab6de7 3/3 column families, dataSize=892 B heapSize=2.06 KB 2024-11-21T00:26:48,418 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7/.tmp/queue/ba20f982144046fe9524c10b2ce92062 is 153, key is 1-5ed4808ef0e6,38425,1732148794847/queue:5ed4808ef0e6%2C38425%2C1732148794847/1732148807528/Put/seqid=0 2024-11-21T00:26:48,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741844_1020 (size=5352) 2024-11-21T00:26:48,440 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 913, reset compression=false 2024-11-21T00:26:48,454 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:26:48,456 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:48,456 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 1804, reset compression=false 2024-11-21T00:26:48,456 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38425,1732148794847 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994, lastWalPosition=1804, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:48,458 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,38425,1732148794847: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,38425,1732148794847, walGroup=5ed4808ef0e6%2C38425%2C1732148794847, offset=5ed4808ef0e6%2C38425%2C1732148794847.1732148796994:1804, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:38425 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:26:48,460 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:26:48,460 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:26:48,461 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:26:48,461 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:26:48,461 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:26:48,461 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1109393408, "init": 1048576000, "max": 2306867200, "used": 708491280 }, "NonHeapMemoryUsage": { "committed": 197001216, "init": 7667712, "max": -1, "used": 194116104 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:26:48,462 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:35843 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:35843 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:26:48,471 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:48,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:48,506 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:48,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:48,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:48,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:48,507 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:48,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:48,671 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 88d78059a3705c06013145894aab6de7 2024-11-21T00:26:48,681 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 1804, reset compression=false 2024-11-21T00:26:48,694 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/info/aa0fea9b0f2e4d05bbb3d1d2085d7181 2024-11-21T00:26:48,717 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/ns/e160929e17b946d09ec289e33d3a221e is 43, key is default/ns:d/1732148797870/Put/seqid=0 2024-11-21T00:26:48,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741845_1021 (size=5153) 2024-11-21T00:26:48,740 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:48,827 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=892 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7/.tmp/queue/ba20f982144046fe9524c10b2ce92062 2024-11-21T00:26:48,829 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:26:48,830 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:26:48,832 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7/.tmp/queue/ba20f982144046fe9524c10b2ce92062 as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7/queue/ba20f982144046fe9524c10b2ce92062 2024-11-21T00:26:48,837 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7/queue/ba20f982144046fe9524c10b2ce92062, entries=1, sequenceid=10, filesize=5.2 K 2024-11-21T00:26:48,837 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~892 B/892, heapSize ~1.55 KB/1584, currentSize=0 B/0 for 88d78059a3705c06013145894aab6de7 in 441ms, sequenceid=10, compaction requested=false 2024-11-21T00:26:48,845 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/replication/88d78059a3705c06013145894aab6de7/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=1 2024-11-21T00:26:48,845 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:26:48,845 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:26:48,846 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:48,846 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 88d78059a3705c06013145894aab6de7: Waiting for close lock at 1732148808396Running coprocessor pre-close hooks at 1732148808396Disabling compacts and flushes for region at 1732148808396Disabling writes for close at 1732148808396Obtaining lock to block concurrent updates at 1732148808396Preparing flush snapshotting stores in 88d78059a3705c06013145894aab6de7 at 1732148808396Finished memstore snapshotting hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7., syncing WAL and waiting on mvcc, flushsize=dataSize=892, getHeapSize=2064, getOffHeapSize=0, getCellsCount=6 at 1732148808396Flushing stores of hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. at 1732148808400 (+4 ms)Flushing 88d78059a3705c06013145894aab6de7/queue: creating writer at 1732148808400Flushing 88d78059a3705c06013145894aab6de7/queue: appending metadata at 1732148808418 (+18 ms)Flushing 88d78059a3705c06013145894aab6de7/queue: closing flushed file at 1732148808418Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e93fc5b: reopening flushed file at 1732148808831 (+413 ms)Finished flush of dataSize ~892 B/892, heapSize ~1.55 KB/1584, currentSize=0 B/0 for 88d78059a3705c06013145894aab6de7 in 441ms, sequenceid=10, compaction requested=false at 1732148808837 (+6 ms)Writing region close event to WAL at 1732148808838 (+1 ms)Running coprocessor post-close hooks at 1732148808845 (+7 ms)Closed at 1732148808846 (+1 ms) 2024-11-21T00:26:48,846 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148802039.88d78059a3705c06013145894aab6de7. 2024-11-21T00:26:48,871 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:48,985 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 1804, reset compression=false 2024-11-21T00:26:49,072 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:49,122 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/ns/e160929e17b946d09ec289e33d3a221e 2024-11-21T00:26:49,140 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/rep_barrier/9be657f6f9f84127943dd9b3fc32aa16 is 112, key is test,,1732148799122.d7dc5d9799b9dbfd0669e5e4e687e303./rep_barrier:seqnumDuringOpen/1732148799630/Put/seqid=0 2024-11-21T00:26:49,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741846_1022 (size=5518) 2024-11-21T00:26:49,244 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:49,272 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:26:49,272 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:26:49,272 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:49,388 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 1804, reset compression=false 2024-11-21T00:26:49,472 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:49,543 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/rep_barrier/9be657f6f9f84127943dd9b3fc32aa16 2024-11-21T00:26:49,565 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/table/fcb4a7ede0114fa5ac0d6656171de11a is 53, key is hbase:replication/table:state/1732148803039/Put/seqid=0 2024-11-21T00:26:49,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741847_1023 (size=5308) 2024-11-21T00:26:49,673 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:49,847 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:49,873 DEBUG [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:49,898 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.1732148796994 to pos 1804, reset compression=false 2024-11-21T00:26:49,950 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-21T00:26:49,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ee3431ada32b00d5fdbcd917d1fe42f7 3/3 column families, dataSize=865 B heapSize=2.25 KB 2024-11-21T00:26:49,974 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/table/fcb4a7ede0114fa5ac0d6656171de11a 2024-11-21T00:26:49,979 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/info/aa0fea9b0f2e4d05bbb3d1d2085d7181 as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/info/aa0fea9b0f2e4d05bbb3d1d2085d7181 2024-11-21T00:26:49,986 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/info/aa0fea9b0f2e4d05bbb3d1d2085d7181, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:26:49,987 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/ns/e160929e17b946d09ec289e33d3a221e as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/ns/e160929e17b946d09ec289e33d3a221e 2024-11-21T00:26:49,991 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/ns/e160929e17b946d09ec289e33d3a221e, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:26:49,992 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/rep_barrier/9be657f6f9f84127943dd9b3fc32aa16 as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/rep_barrier/9be657f6f9f84127943dd9b3fc32aa16 2024-11-21T00:26:49,997 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/rep_barrier/9be657f6f9f84127943dd9b3fc32aa16, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:26:49,998 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/.tmp/table/fcb4a7ede0114fa5ac0d6656171de11a as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/table/fcb4a7ede0114fa5ac0d6656171de11a 2024-11-21T00:26:50,003 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/table/fcb4a7ede0114fa5ac0d6656171de11a, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:26:50,005 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1734ms, sequenceid=16, compaction requested=false 2024-11-21T00:26:50,013 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:26:50,014 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:26:50,014 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:26:50,014 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:26:50,014 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148808271Running coprocessor pre-close hooks at 1732148808271Disabling compacts and flushes for region at 1732148808271Disabling writes for close at 1732148808271Obtaining lock to block concurrent updates at 1732148808271Preparing flush snapshotting stores in 1588230740 at 1732148808271Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148808271Flushing stores of hbase:meta,,1.1588230740 at 1732148808272 (+1 ms)Flushing 1588230740/info: creating writer at 1732148808272Flushing 1588230740/info: appending metadata at 1732148808288 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732148808288Flushing 1588230740/ns: creating writer at 1732148808699 (+411 ms)Flushing 1588230740/ns: appending metadata at 1732148808717 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732148808717Flushing 1588230740/rep_barrier: creating writer at 1732148809126 (+409 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148809139 (+13 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148809139Flushing 1588230740/table: creating writer at 1732148809547 (+408 ms)Flushing 1588230740/table: appending metadata at 1732148809565 (+18 ms)Flushing 1588230740/table: closing flushed file at 1732148809565Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fb718d9: reopening flushed file at 1732148809979 (+414 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bf3f6a1: reopening flushed file at 1732148809986 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d5616b9: reopening flushed file at 1732148809991 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15414f2d: reopening flushed file at 1732148809997 (+6 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1734ms, sequenceid=16, compaction requested=false at 1732148810005 (+8 ms)Writing region close event to WAL at 1732148810008 (+3 ms)Running coprocessor post-close hooks at 1732148810014 (+6 ms)Closed at 1732148810014 2024-11-21T00:26:50,014 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:26:50,073 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,38425,1732148794847; all regions closed. 2024-11-21T00:26:50,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:26:50,075 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/WALs/5ed4808ef0e6,38425,1732148794847/5ed4808ef0e6%2C38425%2C1732148794847.meta.1732148797641.meta not finished, retry = 0 2024-11-21T00:26:50,141 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:26:50,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741839_1015 (size=2676) 2024-11-21T00:26:50,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741832_1008 (size=1812) 2024-11-21T00:26:50,181 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:50,182 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:26:50,182 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:26:50,182 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:26:50,182 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:26:50,182 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:26:50,182 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,38425,1732148794847 because: Region server is closing 2024-11-21T00:26:50,182 INFO [RS:0;5ed4808ef0e6:38425 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:38425. 2024-11-21T00:26:50,183 DEBUG [RS:0;5ed4808ef0e6:38425 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:50,183 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:50,183 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:50,183 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:50,283 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.shipper5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 terminated 2024-11-21T00:26:50,283 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38425,1732148794847.replicationSource.wal-reader.5ed4808ef0e6%2C38425%2C1732148794847,1-5ed4808ef0e6,38425,1732148794847 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:26:50,283 INFO [RS:0;5ed4808ef0e6:38425 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:38425. 2024-11-21T00:26:50,283 DEBUG [RS:0;5ed4808ef0e6:38425 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:50,284 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:50,284 DEBUG [RS:0;5ed4808ef0e6:38425 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:50,284 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:50,284 INFO [RS:0;5ed4808ef0e6:38425 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38425 2024-11-21T00:26:50,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048/rs 2024-11-21T00:26:50,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-991210048/rs/5ed4808ef0e6,38425,1732148794847 2024-11-21T00:26:50,293 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:26:50,294 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,38425,1732148794847] 2024-11-21T00:26:50,314 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-991210048/draining/5ed4808ef0e6,38425,1732148794847 already deleted, retry=false 2024-11-21T00:26:50,314 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,38425,1732148794847 expired; onlineServers=0 2024-11-21T00:26:50,314 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,35843,1732148794477' ***** 2024-11-21T00:26:50,314 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:26:50,315 INFO [M:0;5ed4808ef0e6:35843 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:26:50,315 INFO [M:0;5ed4808ef0e6:35843 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:26:50,315 DEBUG [M:0;5ed4808ef0e6:35843 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:26:50,315 DEBUG [M:0;5ed4808ef0e6:35843 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:26:50,315 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:26:50,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148796635 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148796635,5,FailOnTimeoutGroup] 2024-11-21T00:26:50,315 INFO [M:0;5ed4808ef0e6:35843 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:26:50,315 INFO [M:0;5ed4808ef0e6:35843 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:26:50,315 DEBUG [M:0;5ed4808ef0e6:35843 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:26:50,315 INFO [M:0;5ed4808ef0e6:35843 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:26:50,315 INFO [M:0;5ed4808ef0e6:35843 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:26:50,315 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148796636 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148796636,5,FailOnTimeoutGroup] 2024-11-21T00:26:50,315 INFO [M:0;5ed4808ef0e6:35843 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:26:50,315 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:26:50,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-991210048/master 2024-11-21T00:26:50,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-991210048 2024-11-21T00:26:50,325 INFO [M:0;5ed4808ef0e6:35843 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/.lastflushedseqids 2024-11-21T00:26:50,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741848_1024 (size=245) 2024-11-21T00:26:50,335 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-991210048/master already deleted, retry=false 2024-11-21T00:26:50,335 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Failed delete of our master address node; KeeperErrorCode = NoNode for /1-991210048/master 2024-11-21T00:26:50,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:50,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38425-0x1015ac8e9d30004, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:50,404 INFO [RS:0;5ed4808ef0e6:38425 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:26:50,404 INFO [RS:0;5ed4808ef0e6:38425 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,38425,1732148794847; zookeeper connection closed. 2024-11-21T00:26:50,408 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@392fc9a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@392fc9a 2024-11-21T00:26:50,408 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:26:50,550 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:50,730 INFO [M:0;5ed4808ef0e6:35843 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:26:50,730 INFO [M:0;5ed4808ef0e6:35843 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:26:50,730 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:26:50,730 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:50,730 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:50,730 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:26:50,730 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:50,731 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.50 KB heapSize=64.92 KB 2024-11-21T00:26:50,758 DEBUG [M:0;5ed4808ef0e6:35843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba7009dea9e64b5c99807ee7279e1505 is 82, key is hbase:meta,,1/info:regioninfo/1732148797793/Put/seqid=0 2024-11-21T00:26:50,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741849_1025 (size=5672) 2024-11-21T00:26:50,780 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba7009dea9e64b5c99807ee7279e1505 2024-11-21T00:26:50,812 DEBUG [M:0;5ed4808ef0e6:35843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b4296b755b4c4bb1af74ecab4104efce is 1480, key is \x00\x00\x00\x00\x00\x00\x00\x08/proc:d/1732148803044/Put/seqid=0 2024-11-21T00:26:50,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741850_1026 (size=8516) 2024-11-21T00:26:50,823 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=54.95 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b4296b755b4c4bb1af74ecab4104efce 2024-11-21T00:26:50,846 DEBUG [M:0;5ed4808ef0e6:35843 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/28478d6c82434ca68d084e77b298ee91 is 69, key is 5ed4808ef0e6,38425,1732148794847/rs:state/1732148796697/Put/seqid=0 2024-11-21T00:26:50,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741851_1027 (size=5156) 2024-11-21T00:26:50,860 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/28478d6c82434ca68d084e77b298ee91 2024-11-21T00:26:50,877 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ba7009dea9e64b5c99807ee7279e1505 as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba7009dea9e64b5c99807ee7279e1505 2024-11-21T00:26:50,883 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ba7009dea9e64b5c99807ee7279e1505, entries=8, sequenceid=97, filesize=5.5 K 2024-11-21T00:26:50,884 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b4296b755b4c4bb1af74ecab4104efce as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b4296b755b4c4bb1af74ecab4104efce 2024-11-21T00:26:50,897 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b4296b755b4c4bb1af74ecab4104efce, entries=11, sequenceid=97, filesize=8.3 K 2024-11-21T00:26:50,900 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/28478d6c82434ca68d084e77b298ee91 as hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/28478d6c82434ca68d084e77b298ee91 2024-11-21T00:26:50,907 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41951/user/jenkins/test-data/f958ec60-b556-c107-60fe-bbabd4070ce8/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/28478d6c82434ca68d084e77b298ee91, entries=1, sequenceid=97, filesize=5.0 K 2024-11-21T00:26:50,909 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.50 KB/56835, heapSize ~64.63 KB/66176, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 177ms, sequenceid=97, compaction requested=false 2024-11-21T00:26:50,913 INFO [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:50,914 DEBUG [M:0;5ed4808ef0e6:35843 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148810730Disabling compacts and flushes for region at 1732148810730Disabling writes for close at 1732148810730Obtaining lock to block concurrent updates at 1732148810731 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148810731Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=56835, getHeapSize=66416, getOffHeapSize=0, getCellsCount=114 at 1732148810731Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148810736 (+5 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148810736Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148810758 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148810758Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148810786 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148810811 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148810811Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148810829 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148810846 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148810846Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9b1e7ee: reopening flushed file at 1732148810876 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37a3aa02: reopening flushed file at 1732148810883 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@9bf580b: reopening flushed file at 1732148810897 (+14 ms)Finished flush of dataSize ~55.50 KB/56835, heapSize ~64.63 KB/66176, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 177ms, sequenceid=97, compaction requested=false at 1732148810909 (+12 ms)Writing region close event to WAL at 1732148810913 (+4 ms)Closed at 1732148810913 2024-11-21T00:26:50,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44701 is added to blk_1073741830_1006 (size=63654) 2024-11-21T00:26:50,922 INFO [M:0;5ed4808ef0e6:35843 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:26:50,922 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:26:50,922 INFO [M:0;5ed4808ef0e6:35843 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35843 2024-11-21T00:26:50,922 INFO [M:0;5ed4808ef0e6:35843 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:26:51,076 INFO [M:0;5ed4808ef0e6:35843 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:26:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:51,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35843-0x1015ac8e9d30003, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:51,080 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54e75703{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:26:51,080 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e28b32{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:26:51,080 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:26:51,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ddcd1b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:26:51,081 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@393ac790{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.log.dir/,STOPPED} 2024-11-21T00:26:51,084 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:26:51,085 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:26:51,086 WARN [BP-1270601012-172.17.0.2-1732148791146 heartbeating to localhost/127.0.0.1:41951 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:26:51,086 WARN [BP-1270601012-172.17.0.2-1732148791146 heartbeating to localhost/127.0.0.1:41951 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1270601012-172.17.0.2-1732148791146 (Datanode Uuid be319525-6882-46f1-ac97-61595c0575c5) service to localhost/127.0.0.1:41951 2024-11-21T00:26:51,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/cluster_594054de-0935-d9f2-ef42-a01df030f7f4/data/data1/current/BP-1270601012-172.17.0.2-1732148791146 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:26:51,086 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/cluster_594054de-0935-d9f2-ef42-a01df030f7f4/data/data2/current/BP-1270601012-172.17.0.2-1732148791146 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:26:51,087 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:26:51,094 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4589e064{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:26:51,095 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@585a476f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:26:51,095 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:26:51,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e207ce7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:26:51,095 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@280b75e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.log.dir/,STOPPED} 2024-11-21T00:26:51,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:26:51,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:26:51,113 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:26:51,113 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication1(TestMasterReplication.java:167) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:51,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:51,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:51,113 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:51,113 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:26:51,113 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1950930886, stopped=false 2024-11-21T00:26:51,113 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,38531,1732148787674 2024-11-21T00:26:51,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01745974643/running 2024-11-21T00:26:51,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01745974643/running 2024-11-21T00:26:51,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:51,131 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:51,131 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:26:51,131 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on znode that does not yet exist, /01745974643/running 2024-11-21T00:26:51,132 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Set watcher on znode that does not yet exist, /01745974643/running 2024-11-21T00:26:51,136 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:26:51,136 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication1(TestMasterReplication.java:167) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:51,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:51,137 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,43823,1732148788181' ***** 2024-11-21T00:26:51,137 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:26:51,137 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:26:51,137 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:26:51,137 INFO [RS:0;5ed4808ef0e6:43823 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:26:51,137 INFO [RS:0;5ed4808ef0e6:43823 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:26:51,137 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(3091): Received CLOSE for d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(3091): Received CLOSE for a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d26e6ab2bb8224b29ce4a3ac053d4a44, disabling compactions & flushes 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:43823. 2024-11-21T00:26:51,138 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:51,138 DEBUG [RS:0;5ed4808ef0e6:43823 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. after waiting 0 ms 2024-11-21T00:26:51,138 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:26:51,138 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d26e6ab2bb8224b29ce4a3ac053d4a44 3/3 column families, dataSize=892 B heapSize=2.06 KB 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:26:51,138 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:26:51,138 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1325): Online Regions={d26e6ab2bb8224b29ce4a3ac053d4a44=hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44., 1588230740=hbase:meta,,1.1588230740, a6e31f0ed205e4fb314ad9036d0360ce=test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce.} 2024-11-21T00:26:51,138 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a6e31f0ed205e4fb314ad9036d0360ce, d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:26:51,138 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:26:51,138 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:26:51,139 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:26:51,155 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:26:51,160 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44/.tmp/queue/fb2a2f3a9d064757bd76de8200680b90 is 153, key is 1-5ed4808ef0e6,43823,1732148788181/queue:5ed4808ef0e6%2C43823%2C1732148788181/1732148807817/Put/seqid=0 2024-11-21T00:26:51,168 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/info/6246e443702840c887a0b6b982f8d98d is 147, key is hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44./info:regioninfo/1732148800754/Put/seqid=0 2024-11-21T00:26:51,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741840_1016 (size=5352) 2024-11-21T00:26:51,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741841_1017 (size=7686) 2024-11-21T00:26:51,338 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a6e31f0ed205e4fb314ad9036d0360ce, d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:51,354 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:51,539 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a6e31f0ed205e4fb314ad9036d0360ce, d26e6ab2bb8224b29ce4a3ac053d4a44 2024-11-21T00:26:51,592 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=892 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44/.tmp/queue/fb2a2f3a9d064757bd76de8200680b90 2024-11-21T00:26:51,602 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44/.tmp/queue/fb2a2f3a9d064757bd76de8200680b90 as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44/queue/fb2a2f3a9d064757bd76de8200680b90 2024-11-21T00:26:51,606 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/info/6246e443702840c887a0b6b982f8d98d 2024-11-21T00:26:51,608 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44/queue/fb2a2f3a9d064757bd76de8200680b90, entries=1, sequenceid=10, filesize=5.2 K 2024-11-21T00:26:51,609 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~892 B/892, heapSize ~1.55 KB/1584, currentSize=0 B/0 for d26e6ab2bb8224b29ce4a3ac053d4a44 in 471ms, sequenceid=10, compaction requested=false 2024-11-21T00:26:51,609 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:26:51,618 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/replication/d26e6ab2bb8224b29ce4a3ac053d4a44/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=1 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:26:51,619 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d26e6ab2bb8224b29ce4a3ac053d4a44: Waiting for close lock at 1732148811138Running coprocessor pre-close hooks at 1732148811138Disabling compacts and flushes for region at 1732148811138Disabling writes for close at 1732148811138Obtaining lock to block concurrent updates at 1732148811138Preparing flush snapshotting stores in d26e6ab2bb8224b29ce4a3ac053d4a44 at 1732148811138Finished memstore snapshotting hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44., syncing WAL and waiting on mvcc, flushsize=dataSize=892, getHeapSize=2064, getOffHeapSize=0, getCellsCount=6 at 1732148811138Flushing stores of hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. at 1732148811139 (+1 ms)Flushing d26e6ab2bb8224b29ce4a3ac053d4a44/queue: creating writer at 1732148811139Flushing d26e6ab2bb8224b29ce4a3ac053d4a44/queue: appending metadata at 1732148811160 (+21 ms)Flushing d26e6ab2bb8224b29ce4a3ac053d4a44/queue: closing flushed file at 1732148811160Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bae6986: reopening flushed file at 1732148811602 (+442 ms)Finished flush of dataSize ~892 B/892, heapSize ~1.55 KB/1584, currentSize=0 B/0 for d26e6ab2bb8224b29ce4a3ac053d4a44 in 471ms, sequenceid=10, compaction requested=false at 1732148811609 (+7 ms)Writing region close event to WAL at 1732148811614 (+5 ms)Running coprocessor post-close hooks at 1732148811618 (+4 ms)Closed at 1732148811619 (+1 ms) 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148799838.d26e6ab2bb8224b29ce4a3ac053d4a44. 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a6e31f0ed205e4fb314ad9036d0360ce, disabling compactions & flushes 2024-11-21T00:26:51,619 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. after waiting 0 ms 2024-11-21T00:26:51,619 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:51,619 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a6e31f0ed205e4fb314ad9036d0360ce 3/3 column families, dataSize=191 B heapSize=1.46 KB 2024-11-21T00:26:51,628 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/ns/b90a210d72214c3abc1da92fa0888d78 is 43, key is default/ns:d/1732148790847/Put/seqid=0 2024-11-21T00:26:51,639 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/f/064d57414cd54e4aa7606622664d8d22 is 29, key is row1/f:/1732148807235/DeleteFamily/seqid=0 2024-11-21T00:26:51,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741842_1018 (size=5153) 2024-11-21T00:26:51,643 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/ns/b90a210d72214c3abc1da92fa0888d78 2024-11-21T00:26:51,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741843_1019 (size=5118) 2024-11-21T00:26:51,646 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=112 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/f/064d57414cd54e4aa7606622664d8d22 2024-11-21T00:26:51,650 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 064d57414cd54e4aa7606622664d8d22 2024-11-21T00:26:51,664 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/rep_barrier/422a5a62e8a94bddbe6407f79597d62e is 112, key is test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce./rep_barrier:seqnumDuringOpen/1732148798981/Put/seqid=0 2024-11-21T00:26:51,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741844_1020 (size=5518) 2024-11-21T00:26:51,674 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/f1/0b3461f849184c76bd0098d4fa78292b is 30, key is row1/f1:/1732148807235/DeleteFamily/seqid=0 2024-11-21T00:26:51,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741845_1021 (size=5123) 2024-11-21T00:26:51,678 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/f1/0b3461f849184c76bd0098d4fa78292b 2024-11-21T00:26:51,684 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b3461f849184c76bd0098d4fa78292b 2024-11-21T00:26:51,704 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/norep/618a6ca1fe8a40689b8e80cb7f98d818 is 32, key is row/norep:/1732148806225/DeleteFamily/seqid=0 2024-11-21T00:26:51,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741846_1022 (size=5101) 2024-11-21T00:26:51,709 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/norep/618a6ca1fe8a40689b8e80cb7f98d818 2024-11-21T00:26:51,717 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 618a6ca1fe8a40689b8e80cb7f98d818 2024-11-21T00:26:51,718 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/f/064d57414cd54e4aa7606622664d8d22 as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/f/064d57414cd54e4aa7606622664d8d22 2024-11-21T00:26:51,725 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 064d57414cd54e4aa7606622664d8d22 2024-11-21T00:26:51,725 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/f/064d57414cd54e4aa7606622664d8d22, entries=2, sequenceid=8, filesize=5.0 K 2024-11-21T00:26:51,725 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:26:51,725 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:26:51,726 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/f1/0b3461f849184c76bd0098d4fa78292b as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/f1/0b3461f849184c76bd0098d4fa78292b 2024-11-21T00:26:51,733 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b3461f849184c76bd0098d4fa78292b 2024-11-21T00:26:51,733 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/f1/0b3461f849184c76bd0098d4fa78292b, entries=2, sequenceid=8, filesize=5.0 K 2024-11-21T00:26:51,734 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/.tmp/norep/618a6ca1fe8a40689b8e80cb7f98d818 as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/norep/618a6ca1fe8a40689b8e80cb7f98d818 2024-11-21T00:26:51,739 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a6e31f0ed205e4fb314ad9036d0360ce 2024-11-21T00:26:51,740 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 618a6ca1fe8a40689b8e80cb7f98d818 2024-11-21T00:26:51,741 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/norep/618a6ca1fe8a40689b8e80cb7f98d818, entries=1, sequenceid=8, filesize=5.0 K 2024-11-21T00:26:51,742 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~191 B/191, heapSize ~1.41 KB/1448, currentSize=0 B/0 for a6e31f0ed205e4fb314ad9036d0360ce in 123ms, sequenceid=8, compaction requested=false 2024-11-21T00:26:51,742 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:26:51,765 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/default/test/a6e31f0ed205e4fb314ad9036d0360ce/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-21T00:26:51,766 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:26:51,766 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:26:51,766 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:51,766 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a6e31f0ed205e4fb314ad9036d0360ce: Waiting for close lock at 1732148811619Running coprocessor pre-close hooks at 1732148811619Disabling compacts and flushes for region at 1732148811619Disabling writes for close at 1732148811619Obtaining lock to block concurrent updates at 1732148811619Preparing flush snapshotting stores in a6e31f0ed205e4fb314ad9036d0360ce at 1732148811619Finished memstore snapshotting test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce., syncing WAL and waiting on mvcc, flushsize=dataSize=191, getHeapSize=1448, getOffHeapSize=0, getCellsCount=7 at 1732148811620 (+1 ms)Flushing stores of test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. at 1732148811620Flushing a6e31f0ed205e4fb314ad9036d0360ce/f: creating writer at 1732148811621 (+1 ms)Flushing a6e31f0ed205e4fb314ad9036d0360ce/f: appending metadata at 1732148811639 (+18 ms)Flushing a6e31f0ed205e4fb314ad9036d0360ce/f: closing flushed file at 1732148811639Flushing a6e31f0ed205e4fb314ad9036d0360ce/f1: creating writer at 1732148811650 (+11 ms)Flushing a6e31f0ed205e4fb314ad9036d0360ce/f1: appending metadata at 1732148811673 (+23 ms)Flushing a6e31f0ed205e4fb314ad9036d0360ce/f1: closing flushed file at 1732148811673Flushing a6e31f0ed205e4fb314ad9036d0360ce/norep: creating writer at 1732148811684 (+11 ms)Flushing a6e31f0ed205e4fb314ad9036d0360ce/norep: appending metadata at 1732148811703 (+19 ms)Flushing a6e31f0ed205e4fb314ad9036d0360ce/norep: closing flushed file at 1732148811704 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22431e47: reopening flushed file at 1732148811717 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15bb4353: reopening flushed file at 1732148811725 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@75c921c5: reopening flushed file at 1732148811733 (+8 ms)Finished flush of dataSize ~191 B/191, heapSize ~1.41 KB/1448, currentSize=0 B/0 for a6e31f0ed205e4fb314ad9036d0360ce in 123ms, sequenceid=8, compaction requested=false at 1732148811742 (+9 ms)Writing region close event to WAL at 1732148811758 (+16 ms)Running coprocessor post-close hooks at 1732148811766 (+8 ms)Closed at 1732148811766 2024-11-21T00:26:51,766 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148797974.a6e31f0ed205e4fb314ad9036d0360ce. 2024-11-21T00:26:51,940 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:52,075 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/rep_barrier/422a5a62e8a94bddbe6407f79597d62e 2024-11-21T00:26:52,099 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/table/2f1841b14e9c423c861110195d9a3a4f is 53, key is hbase:replication/table:state/1732148800764/Put/seqid=0 2024-11-21T00:26:52,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741847_1023 (size=5308) 2024-11-21T00:26:52,107 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/table/2f1841b14e9c423c861110195d9a3a4f 2024-11-21T00:26:52,112 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/info/6246e443702840c887a0b6b982f8d98d as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/info/6246e443702840c887a0b6b982f8d98d 2024-11-21T00:26:52,117 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/info/6246e443702840c887a0b6b982f8d98d, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:26:52,118 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/ns/b90a210d72214c3abc1da92fa0888d78 as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/ns/b90a210d72214c3abc1da92fa0888d78 2024-11-21T00:26:52,126 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/ns/b90a210d72214c3abc1da92fa0888d78, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:26:52,127 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/rep_barrier/422a5a62e8a94bddbe6407f79597d62e as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/rep_barrier/422a5a62e8a94bddbe6407f79597d62e 2024-11-21T00:26:52,132 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/rep_barrier/422a5a62e8a94bddbe6407f79597d62e, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:26:52,133 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/.tmp/table/2f1841b14e9c423c861110195d9a3a4f as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/table/2f1841b14e9c423c861110195d9a3a4f 2024-11-21T00:26:52,137 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/table/2f1841b14e9c423c861110195d9a3a4f, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:26:52,138 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1000ms, sequenceid=16, compaction requested=false 2024-11-21T00:26:52,140 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:26:52,140 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:26:52,140 DEBUG [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:26:52,143 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:26:52,143 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:26:52,143 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:26:52,143 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:26:52,144 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148811138Running coprocessor pre-close hooks at 1732148811138Disabling compacts and flushes for region at 1732148811138Disabling writes for close at 1732148811138Obtaining lock to block concurrent updates at 1732148811139 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732148811139Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148811139Flushing stores of hbase:meta,,1.1588230740 at 1732148811139Flushing 1588230740/info: creating writer at 1732148811139Flushing 1588230740/info: appending metadata at 1732148811168 (+29 ms)Flushing 1588230740/info: closing flushed file at 1732148811168Flushing 1588230740/ns: creating writer at 1732148811610 (+442 ms)Flushing 1588230740/ns: appending metadata at 1732148811628 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732148811628Flushing 1588230740/rep_barrier: creating writer at 1732148811647 (+19 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148811664 (+17 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148811664Flushing 1588230740/table: creating writer at 1732148812081 (+417 ms)Flushing 1588230740/table: appending metadata at 1732148812098 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732148812098Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@617bf9b5: reopening flushed file at 1732148812111 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5033c3c4: reopening flushed file at 1732148812118 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6e297b0b: reopening flushed file at 1732148812126 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1765c556: reopening flushed file at 1732148812132 (+6 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1000ms, sequenceid=16, compaction requested=false at 1732148812138 (+6 ms)Writing region close event to WAL at 1732148812140 (+2 ms)Running coprocessor post-close hooks at 1732148812143 (+3 ms)Closed at 1732148812143 2024-11-21T00:26:52,144 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:26:52,258 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 914, reset compression=false 2024-11-21T00:26:52,262 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:26:52,263 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804 to pos 1805, reset compression=false 2024-11-21T00:26:52,263 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43823,1732148788181 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/WALs/5ed4808ef0e6,43823,1732148788181/5ed4808ef0e6%2C43823%2C1732148788181.1732148789804, lastWalPosition=1805, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:26:52,263 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,43823,1732148788181: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,43823,1732148788181, walGroup=5ed4808ef0e6%2C43823%2C1732148788181, offset=5ed4808ef0e6%2C43823%2C1732148788181.1732148789804:1805, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:43823 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:26:52,265 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:26:52,265 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:26:52,265 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:26:52,265 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:26:52,265 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:26:52,265 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1109393408, "init": 1048576000, "max": 2306867200, "used": 807057424 }, "NonHeapMemoryUsage": { "committed": 198115328, "init": 7667712, "max": -1, "used": 195242888 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:26:52,265 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:38531 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:38531 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:26:52,340 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,43823,1732148788181; all regions closed. 2024-11-21T00:26:52,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:26:52,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741839_1015 (size=2676) 2024-11-21T00:26:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741833_1009 (size=1813) 2024-11-21T00:26:52,346 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:52,346 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:26:52,347 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:26:52,347 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:26:52,347 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:26:52,347 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:26:52,347 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,43823,1732148788181 because: Region server is closing 2024-11-21T00:26:52,347 INFO [RS:0;5ed4808ef0e6:43823 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:43823. 2024-11-21T00:26:52,347 DEBUG [RS:0;5ed4808ef0e6:43823 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:52,347 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:52,347 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:52,347 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:52,448 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.shipper5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 terminated 2024-11-21T00:26:52,448 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43823,1732148788181.replicationSource.wal-reader.5ed4808ef0e6%2C43823%2C1732148788181,1-5ed4808ef0e6,43823,1732148788181 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:26:52,448 INFO [RS:0;5ed4808ef0e6:43823 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:43823. 2024-11-21T00:26:52,448 DEBUG [RS:0;5ed4808ef0e6:43823 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:26:52,448 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:52,448 DEBUG [RS:0;5ed4808ef0e6:43823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:26:52,448 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:26:52,448 INFO [RS:0;5ed4808ef0e6:43823 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43823 2024-11-21T00:26:52,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643/rs 2024-11-21T00:26:52,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01745974643/rs/5ed4808ef0e6,43823,1732148788181 2024-11-21T00:26:52,499 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:26:52,500 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,43823,1732148788181] 2024-11-21T00:26:52,520 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /01745974643/draining/5ed4808ef0e6,43823,1732148788181 already deleted, retry=false 2024-11-21T00:26:52,520 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,43823,1732148788181 expired; onlineServers=0 2024-11-21T00:26:52,520 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,38531,1732148787674' ***** 2024-11-21T00:26:52,520 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:26:52,520 INFO [M:0;5ed4808ef0e6:38531 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:26:52,520 INFO [M:0;5ed4808ef0e6:38531 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:26:52,520 DEBUG [M:0;5ed4808ef0e6:38531 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:26:52,520 DEBUG [M:0;5ed4808ef0e6:38531 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:26:52,520 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:26:52,520 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148789317 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148789317,5,FailOnTimeoutGroup] 2024-11-21T00:26:52,520 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148789303 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148789303,5,FailOnTimeoutGroup] 2024-11-21T00:26:52,521 INFO [M:0;5ed4808ef0e6:38531 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:26:52,521 INFO [M:0;5ed4808ef0e6:38531 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:26:52,521 DEBUG [M:0;5ed4808ef0e6:38531 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:26:52,521 INFO [M:0;5ed4808ef0e6:38531 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:26:52,521 INFO [M:0;5ed4808ef0e6:38531 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:26:52,521 INFO [M:0;5ed4808ef0e6:38531 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:26:52,521 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:26:52,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01745974643/master 2024-11-21T00:26:52,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01745974643 2024-11-21T00:26:52,531 DEBUG [M:0;5ed4808ef0e6:38531 {}] zookeeper.ZKUtil(347): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Unable to get data of znode /01745974643/master because node does not exist (not an error) 2024-11-21T00:26:52,531 WARN [M:0;5ed4808ef0e6:38531 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:26:52,532 INFO [M:0;5ed4808ef0e6:38531 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/.lastflushedseqids 2024-11-21T00:26:52,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741848_1024 (size=245) 2024-11-21T00:26:52,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:52,610 INFO [RS:0;5ed4808ef0e6:43823 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:26:52,610 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43823-0x1015ac8e9d30001, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:52,610 INFO [RS:0;5ed4808ef0e6:43823 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,43823,1732148788181; zookeeper connection closed. 2024-11-21T00:26:52,610 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@68914e13 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@68914e13 2024-11-21T00:26:52,610 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:26:52,936 INFO [M:0;5ed4808ef0e6:38531 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:26:52,936 INFO [M:0;5ed4808ef0e6:38531 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:26:52,936 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:26:52,936 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:52,936 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:52,936 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:26:52,936 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:52,936 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.50 KB heapSize=64.92 KB 2024-11-21T00:26:52,951 DEBUG [M:0;5ed4808ef0e6:38531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3014b08290f7497588da8e85596ffcaf is 82, key is hbase:meta,,1/info:regioninfo/1732148790589/Put/seqid=0 2024-11-21T00:26:52,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741849_1025 (size=5672) 2024-11-21T00:26:52,958 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3014b08290f7497588da8e85596ffcaf 2024-11-21T00:26:52,983 DEBUG [M:0;5ed4808ef0e6:38531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5c297cf5f2648498cb9f6b1395abdd5 is 1480, key is \x00\x00\x00\x00\x00\x00\x00\x08/proc:d/1732148800770/Put/seqid=0 2024-11-21T00:26:52,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741850_1026 (size=8516) 2024-11-21T00:26:53,390 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=54.95 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5c297cf5f2648498cb9f6b1395abdd5 2024-11-21T00:26:53,407 DEBUG [M:0;5ed4808ef0e6:38531 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1adb0c47f71f47bc9e528168cc355372 is 69, key is 5ed4808ef0e6,43823,1732148788181/rs:state/1732148789505/Put/seqid=0 2024-11-21T00:26:53,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741851_1027 (size=5156) 2024-11-21T00:26:53,411 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1adb0c47f71f47bc9e528168cc355372 2024-11-21T00:26:53,415 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3014b08290f7497588da8e85596ffcaf as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3014b08290f7497588da8e85596ffcaf 2024-11-21T00:26:53,419 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3014b08290f7497588da8e85596ffcaf, entries=8, sequenceid=97, filesize=5.5 K 2024-11-21T00:26:53,420 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d5c297cf5f2648498cb9f6b1395abdd5 as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5c297cf5f2648498cb9f6b1395abdd5 2024-11-21T00:26:53,424 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d5c297cf5f2648498cb9f6b1395abdd5, entries=11, sequenceid=97, filesize=8.3 K 2024-11-21T00:26:53,425 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1adb0c47f71f47bc9e528168cc355372 as hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1adb0c47f71f47bc9e528168cc355372 2024-11-21T00:26:53,430 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41751/user/jenkins/test-data/43130dcd-3c8a-4f85-29eb-4f033b4a39b4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1adb0c47f71f47bc9e528168cc355372, entries=1, sequenceid=97, filesize=5.0 K 2024-11-21T00:26:53,431 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.50 KB/56835, heapSize ~64.63 KB/66176, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 495ms, sequenceid=97, compaction requested=false 2024-11-21T00:26:53,432 INFO [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:26:53,432 DEBUG [M:0;5ed4808ef0e6:38531 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148812936Disabling compacts and flushes for region at 1732148812936Disabling writes for close at 1732148812936Obtaining lock to block concurrent updates at 1732148812936Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148812936Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=56835, getHeapSize=66416, getOffHeapSize=0, getCellsCount=114 at 1732148812937 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148812937Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148812937Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148812951 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148812951Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148812968 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148812983 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148812983Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148813393 (+410 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148813407 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148813407Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@790777f: reopening flushed file at 1732148813414 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60321188: reopening flushed file at 1732148813419 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@257e6fc: reopening flushed file at 1732148813424 (+5 ms)Finished flush of dataSize ~55.50 KB/56835, heapSize ~64.63 KB/66176, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 495ms, sequenceid=97, compaction requested=false at 1732148813431 (+7 ms)Writing region close event to WAL at 1732148813432 (+1 ms)Closed at 1732148813432 2024-11-21T00:26:53,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44835 is added to blk_1073741830_1006 (size=63654) 2024-11-21T00:26:53,434 INFO [M:0;5ed4808ef0e6:38531 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:26:53,434 INFO [M:0;5ed4808ef0e6:38531 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38531 2024-11-21T00:26:53,434 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:26:53,436 INFO [M:0;5ed4808ef0e6:38531 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:26:53,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:53,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38531-0x1015ac8e9d30000, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:26:53,592 INFO [M:0;5ed4808ef0e6:38531 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:26:53,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7017af6e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:26:53,604 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@b1a67c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:26:53,604 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:26:53,604 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c0d6b7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:26:53,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@147350c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.log.dir/,STOPPED} 2024-11-21T00:26:53,612 WARN [BP-1178125728-172.17.0.2-1732148784711 heartbeating to localhost/127.0.0.1:41751 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:26:53,612 WARN [BP-1178125728-172.17.0.2-1732148784711 heartbeating to localhost/127.0.0.1:41751 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1178125728-172.17.0.2-1732148784711 (Datanode Uuid bd921b04-f6f2-4edf-972b-a6794fdfc30e) service to localhost/127.0.0.1:41751 2024-11-21T00:26:53,612 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:26:53,612 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:26:53,612 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb/data/data1/current/BP-1178125728-172.17.0.2-1732148784711 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:26:53,613 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/cluster_ee0e2d9b-5928-02ee-d187-4ac9bf1cbfeb/data/data2/current/BP-1178125728-172.17.0.2-1732148784711 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:26:53,630 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:26:53,635 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@29bd1fb9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:26:53,636 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6006c435{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:26:53,636 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:26:53,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d76cdd7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:26:53,636 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f5049d8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/8f4eaf6b-2c5b-40a3-39ff-11e0d621c496/hadoop.log.dir/,STOPPED} 2024-11-21T00:26:53,641 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:26:53,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:26:53,673 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testCyclicReplication1 Thread=495 (was 438) Potentially hanging thread: nioEventLoopGroup-22-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41951 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.13@localhost:41951 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-22-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41751 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:57893) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: nioEventLoopGroup-23-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:57893) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: HMaster-EventLoopGroup-29-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-22-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-29-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-28-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-30-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-29-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-28-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-23-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-30-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41751 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41751 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41951 from jenkins.hfs.13 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-27-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41951 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41951 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-27-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41951 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-23-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-28-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.12@localhost:41751 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-27-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41751 from jenkins.hfs.12 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-30-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 718) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=825 (was 645) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1350 (was 1878) 2024-11-21T00:26:53,689 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testCyclicReplication2 Thread=495, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=825, ProcessCount=11, AvailableMemoryMB=1350 2024-11-21T00:26:53,707 INFO [Time-limited test {}] replication.TestMasterReplication(246): testCyclicReplication2 2024-11-21T00:26:53,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.log.dir so I do NOT create it in target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8 2024-11-21T00:26:53,708 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:26:53,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.tmp.dir so I do NOT create it in target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8 2024-11-21T00:26:53,708 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/81b95852-57e7-bc40-6da5-a1e0ddb59a9f/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:26:53,708 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8 2024-11-21T00:26:53,708 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6, deleteOnExit=true 2024-11-21T00:26:53,711 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6/zookeeper_0, clientPort=50082, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:26:53,712 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50082 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/test.cache.data in system properties and HBase conf 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:26:53,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:26:53,712 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:26:53,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:26:53,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015ac8e9d30002, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:26:53,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015ac8e9d30005, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:26:53,739 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015ac8e9d30002, quorum=127.0.0.1:57893, baseZNode=/01745974643 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:26:53,739 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015ac8e9d30005, quorum=127.0.0.1:57893, baseZNode=/1-991210048 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:26:53,898 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:26:53,899 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:53,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:26:54,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:54,175 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:54,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:54,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:54,197 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:26:54,197 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:54,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b7d156c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:54,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64fb5b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:54,310 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@754f6ac0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/java.io.tmpdir/jetty-localhost-44405-hadoop-hdfs-3_4_1-tests_jar-_-any-7991361473225470765/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:26:54,310 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@661cc5d6{HTTP/1.1, (http/1.1)}{localhost:44405} 2024-11-21T00:26:54,310 INFO [Time-limited test {}] server.Server(415): Started @548860ms 2024-11-21T00:26:54,458 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:26:54,631 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:26:54,639 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:26:54,648 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:26:54,648 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:26:54,648 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:26:54,649 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68ca5029{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:26:54,649 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f29cd30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:26:54,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@396e52d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/java.io.tmpdir/jetty-localhost-35753-hadoop-hdfs-3_4_1-tests_jar-_-any-4049644988605381141/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:26:54,769 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a7e3235{HTTP/1.1, (http/1.1)}{localhost:35753} 2024-11-21T00:26:54,769 INFO [Time-limited test {}] server.Server(415): Started @549318ms 2024-11-21T00:26:54,770 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:26:55,676 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:04,080 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:05,237 WARN [Thread-2705 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6/data/data1/current/BP-1035257430-172.17.0.2-1732148813734/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:05,247 WARN [Thread-2706 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6/data/data2/current/BP-1035257430-172.17.0.2-1732148813734/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:05,337 WARN [Thread-2691 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:27:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x591834c85f4bc2ca with lease ID 0xd3f5fb46eefa4033: Processing first storage report for DS-7ff55e78-ba20-4558-ada4-8cb39555b95f from datanode DatanodeRegistration(127.0.0.1:33273, datanodeUuid=9f5180f9-d8d7-46f9-8cc7-2da3c71c59fa, infoPort=42655, infoSecurePort=0, ipcPort=42425, storageInfo=lv=-57;cid=testClusterID;nsid=1398945854;c=1732148813734) 2024-11-21T00:27:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x591834c85f4bc2ca with lease ID 0xd3f5fb46eefa4033: from storage DS-7ff55e78-ba20-4558-ada4-8cb39555b95f node DatanodeRegistration(127.0.0.1:33273, datanodeUuid=9f5180f9-d8d7-46f9-8cc7-2da3c71c59fa, infoPort=42655, infoSecurePort=0, ipcPort=42425, storageInfo=lv=-57;cid=testClusterID;nsid=1398945854;c=1732148813734), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:27:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x591834c85f4bc2ca with lease ID 0xd3f5fb46eefa4033: Processing first storage report for DS-aef8fd82-297f-4957-a01e-2b62f8ad1481 from datanode DatanodeRegistration(127.0.0.1:33273, datanodeUuid=9f5180f9-d8d7-46f9-8cc7-2da3c71c59fa, infoPort=42655, infoSecurePort=0, ipcPort=42425, storageInfo=lv=-57;cid=testClusterID;nsid=1398945854;c=1732148813734) 2024-11-21T00:27:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x591834c85f4bc2ca with lease ID 0xd3f5fb46eefa4033: from storage DS-aef8fd82-297f-4957-a01e-2b62f8ad1481 node DatanodeRegistration(127.0.0.1:33273, datanodeUuid=9f5180f9-d8d7-46f9-8cc7-2da3c71c59fa, infoPort=42655, infoSecurePort=0, ipcPort=42425, storageInfo=lv=-57;cid=testClusterID;nsid=1398945854;c=1732148813734), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:27:05,453 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8 2024-11-21T00:27:05,453 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:05,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:05,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:27:05,924 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b with version=8 2024-11-21T00:27:05,924 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/hbase-staging 2024-11-21T00:27:05,926 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:05,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:05,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:05,926 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:05,926 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:05,927 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:05,927 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:27:05,927 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:05,931 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42819 2024-11-21T00:27:05,932 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42819 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:06,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428190x0, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:06,060 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42819-0x1015ac95b3a0000 connected 2024-11-21T00:27:06,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:06,270 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:06,288 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on znode that does not yet exist, /01148209107/running 2024-11-21T00:27:06,289 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b, hbase.cluster.distributed=false 2024-11-21T00:27:06,290 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on znode that does not yet exist, /01148209107/acl 2024-11-21T00:27:06,332 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42819 2024-11-21T00:27:06,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42819 2024-11-21T00:27:06,364 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42819 2024-11-21T00:27:06,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42819 2024-11-21T00:27:06,391 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42819 2024-11-21T00:27:06,421 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:06,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:06,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:06,421 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:06,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:06,421 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:06,421 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:27:06,421 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:06,449 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33853 2024-11-21T00:27:06,450 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33853 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:06,451 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:06,455 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:06,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338530x0, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:06,548 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:338530x0, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on znode that does not yet exist, /01148209107/running 2024-11-21T00:27:06,549 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:27:06,560 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33853-0x1015ac95b3a0001 connected 2024-11-21T00:27:06,591 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:27:06,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on znode that does not yet exist, /01148209107/master 2024-11-21T00:27:06,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on znode that does not yet exist, /01148209107/acl 2024-11-21T00:27:06,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33853 2024-11-21T00:27:06,648 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33853 2024-11-21T00:27:06,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33853 2024-11-21T00:27:06,688 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33853 2024-11-21T00:27:06,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33853 2024-11-21T00:27:06,768 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:42819 2024-11-21T00:27:06,789 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /01148209107/backup-masters/5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:06,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107/backup-masters 2024-11-21T00:27:06,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107/backup-masters 2024-11-21T00:27:06,805 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on existing znode=/01148209107/backup-masters/5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:06,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01148209107/master 2024-11-21T00:27:06,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:06,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:06,815 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on existing znode=/01148209107/master 2024-11-21T00:27:06,816 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /01148209107/backup-masters/5ed4808ef0e6,42819,1732148825926 from backup master directory 2024-11-21T00:27:06,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01148209107/backup-masters/5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:06,826 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:06,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107/backup-masters 2024-11-21T00:27:06,826 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:06,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107/backup-masters 2024-11-21T00:27:06,881 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/hbase.id] with ID: 7aafd06b-565f-4e2c-a825-a8d6ed1445cb 2024-11-21T00:27:06,881 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/.tmp/hbase.id 2024-11-21T00:27:06,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:27:07,345 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/.tmp/hbase.id]:[hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/hbase.id] 2024-11-21T00:27:07,358 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:07,359 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:27:07,360 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:27:07,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:07,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:07,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:27:07,530 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:07,531 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:27:07,532 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:07,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:27:07,546 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store 2024-11-21T00:27:07,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:27:08,010 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:08,010 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:08,010 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:08,010 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:08,010 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:08,010 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:08,011 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:08,011 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148828010Disabling compacts and flushes for region at 1732148828010Disabling writes for close at 1732148828010Writing region close event to WAL at 1732148828010Closed at 1732148828010 2024-11-21T00:27:08,012 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/.initializing 2024-11-21T00:27:08,012 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/WALs/5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:08,013 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:08,015 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C42819%2C1732148825926, suffix=, logDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/WALs/5ed4808ef0e6,42819,1732148825926, archiveDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/oldWALs, maxLogs=10 2024-11-21T00:27:08,037 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/WALs/5ed4808ef0e6,42819,1732148825926/5ed4808ef0e6%2C42819%2C1732148825926.1732148828015, exclude list is [], retry=0 2024-11-21T00:27:08,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33273,DS-7ff55e78-ba20-4558-ada4-8cb39555b95f,DISK] 2024-11-21T00:27:08,055 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/WALs/5ed4808ef0e6,42819,1732148825926/5ed4808ef0e6%2C42819%2C1732148825926.1732148828015 2024-11-21T00:27:08,060 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655)] 2024-11-21T00:27:08,060 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:08,061 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:08,061 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,061 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:27:08,080 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:08,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:08,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:27:08,082 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:08,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:08,082 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:27:08,100 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:08,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:08,102 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,104 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:27:08,104 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:08,112 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:08,113 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,114 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,114 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,115 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,115 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,116 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:08,118 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:08,127 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:08,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63422462, jitterRate=-0.05493167042732239}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:08,127 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148828063Initializing all the Stores at 1732148828064 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148828064Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148828066 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148828066Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148828066Cleaning up temporary data from old regions at 1732148828115 (+49 ms)Region opened successfully at 1732148828127 (+12 ms) 2024-11-21T00:27:08,136 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:27:08,145 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ab85f41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:08,150 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:27:08,150 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:27:08,150 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:27:08,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:27:08,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:27:08,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:27:08,152 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:27:08,156 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:27:08,157 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Unable to get data of znode /01148209107/balancer because node does not exist (not necessarily an error) 2024-11-21T00:27:08,208 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01148209107/balancer already deleted, retry=false 2024-11-21T00:27:08,209 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:27:08,209 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Unable to get data of znode /01148209107/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:27:08,221 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01148209107/normalizer already deleted, retry=false 2024-11-21T00:27:08,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:27:08,240 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Unable to get data of znode /01148209107/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:27:08,253 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01148209107/switch/split already deleted, retry=false 2024-11-21T00:27:08,254 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Unable to get data of znode /01148209107/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:27:08,263 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01148209107/switch/merge already deleted, retry=false 2024-11-21T00:27:08,273 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Unable to get data of znode /01148209107/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:27:08,288 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01148209107/snapshot-cleanup already deleted, retry=false 2024-11-21T00:27:08,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01148209107/running 2024-11-21T00:27:08,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01148209107/running 2024-11-21T00:27:08,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:08,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:08,299 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,42819,1732148825926, sessionid=0x1015ac95b3a0000, setting cluster-up flag (Was=false) 2024-11-21T00:27:08,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:08,351 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01148209107/flush-table-proc/acquired, /01148209107/flush-table-proc/reached, /01148209107/flush-table-proc/abort 2024-11-21T00:27:08,353 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:08,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:08,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:08,404 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01148209107/online-snapshot/acquired, /01148209107/online-snapshot/reached, /01148209107/online-snapshot/abort 2024-11-21T00:27:08,405 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:08,416 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:27:08,419 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:08,419 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:27:08,419 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:27:08,420 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,42819,1732148825926 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:08,424 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,490 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:08,490 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:27:08,491 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:08,491 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:27:08,508 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(746): ClusterId : 7aafd06b-565f-4e2c-a825-a8d6ed1445cb 2024-11-21T00:27:08,508 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:27:08,521 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:27:08,521 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:27:08,527 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148858527 2024-11-21T00:27:08,528 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:27:08,528 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:27:08,528 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:27:08,528 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:27:08,528 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:27:08,528 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:27:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,530 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:27:08,530 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:27:08,530 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:27:08,530 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:27:08,531 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:27:08,531 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:27:08,531 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148828531,5,FailOnTimeoutGroup] 2024-11-21T00:27:08,531 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:27:08,531 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148828531,5,FailOnTimeoutGroup] 2024-11-21T00:27:08,531 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,531 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:27:08,531 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,531 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,532 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@85b7531, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:08,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:27:08,551 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:33853 2024-11-21T00:27:08,552 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:27:08,552 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:27:08,552 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:27:08,552 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,42819,1732148825926 with port=33853, startcode=1732148826420 2024-11-21T00:27:08,553 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:27:08,578 INFO [HMaster-EventLoopGroup-31-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41281, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.14 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:27:08,578 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42819 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:08,579 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42819 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:08,580 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b 2024-11-21T00:27:08,581 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37293 2024-11-21T00:27:08,581 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:27:08,593 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107/rs 2024-11-21T00:27:08,594 DEBUG [RS:0;5ed4808ef0e6:33853 {}] zookeeper.ZKUtil(111): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on existing znode=/01148209107/rs/5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:08,594 WARN [RS:0;5ed4808ef0e6:33853 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:08,594 INFO [RS:0;5ed4808ef0e6:33853 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:08,594 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:08,614 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,33853,1732148826420] 2024-11-21T00:27:08,631 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:27:08,656 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:27:08,674 INFO [RS:0;5ed4808ef0e6:33853 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:27:08,674 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,675 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:27:08,676 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:27:08,676 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,676 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,676 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:08,677 DEBUG [RS:0;5ed4808ef0e6:33853 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:08,708 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,708 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,708 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,708 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,708 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,708 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33853,1732148826420-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:08,740 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:27:08,741 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33853,1732148826420-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,741 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,741 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.Replication(171): 5ed4808ef0e6,33853,1732148826420 started 2024-11-21T00:27:08,762 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:08,762 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,33853,1732148826420, RpcServer on 5ed4808ef0e6/172.17.0.2:33853, sessionid=0x1015ac95b3a0001 2024-11-21T00:27:08,762 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:27:08,763 DEBUG [RS:0;5ed4808ef0e6:33853 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:08,763 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33853,1732148826420' 2024-11-21T00:27:08,763 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01148209107/flush-table-proc/abort' 2024-11-21T00:27:08,763 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01148209107/flush-table-proc/acquired' 2024-11-21T00:27:08,764 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:27:08,764 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:27:08,764 DEBUG [RS:0;5ed4808ef0e6:33853 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:08,764 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33853,1732148826420' 2024-11-21T00:27:08,764 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01148209107/online-snapshot/abort' 2024-11-21T00:27:08,764 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01148209107/online-snapshot/acquired' 2024-11-21T00:27:08,765 DEBUG [RS:0;5ed4808ef0e6:33853 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:27:08,765 INFO [RS:0;5ed4808ef0e6:33853 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:27:08,765 INFO [RS:0;5ed4808ef0e6:33853 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:27:08,865 INFO [RS:0;5ed4808ef0e6:33853 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:08,869 INFO [RS:0;5ed4808ef0e6:33853 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33853%2C1732148826420, suffix=, logDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420, archiveDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/oldWALs, maxLogs=10 2024-11-21T00:27:08,890 DEBUG [RS:0;5ed4808ef0e6:33853 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, exclude list is [], retry=0 2024-11-21T00:27:08,894 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33273,DS-7ff55e78-ba20-4558-ada4-8cb39555b95f,DISK] 2024-11-21T00:27:08,906 INFO [RS:0;5ed4808ef0e6:33853 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 2024-11-21T00:27:08,907 DEBUG [RS:0;5ed4808ef0e6:33853 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655)] 2024-11-21T00:27:08,960 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:27:08,960 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b 2024-11-21T00:27:09,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:27:09,016 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:09,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:09,032 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:09,032 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,033 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:09,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:09,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:09,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:09,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:09,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:09,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,040 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:09,041 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740 2024-11-21T00:27:09,041 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740 2024-11-21T00:27:09,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:09,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:09,042 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:09,044 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:09,061 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:09,061 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74678454, jitterRate=0.11279568076133728}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:09,061 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148829016Initializing all the Stores at 1732148829016Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148829016Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148829026 (+10 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148829026Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148829026Cleaning up temporary data from old regions at 1732148829042 (+16 ms)Region opened successfully at 1732148829061 (+19 ms) 2024-11-21T00:27:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:09,062 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:09,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:09,076 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:09,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148829062Disabling compacts and flushes for region at 1732148829062Disabling writes for close at 1732148829062Writing region close event to WAL at 1732148829076 (+14 ms)Closed at 1732148829076 2024-11-21T00:27:09,078 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:09,078 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:27:09,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:27:09,079 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:09,080 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:27:09,230 DEBUG [5ed4808ef0e6:42819 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:27:09,231 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:09,232 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33853,1732148826420, state=OPENING 2024-11-21T00:27:09,374 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:27:09,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:09,401 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01148209107/meta-region-server: CHANGED 2024-11-21T00:27:09,401 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:09,402 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33853,1732148826420}] 2024-11-21T00:27:09,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:09,404 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01148209107/meta-region-server: CHANGED 2024-11-21T00:27:09,562 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:09,565 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50791, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:27:09,585 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:27:09,585 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:09,585 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:27:09,587 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33853%2C1732148826420.meta, suffix=.meta, logDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420, archiveDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/oldWALs, maxLogs=10 2024-11-21T00:27:09,604 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.meta.1732148829587.meta, exclude list is [], retry=0 2024-11-21T00:27:09,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33273,DS-7ff55e78-ba20-4558-ada4-8cb39555b95f,DISK] 2024-11-21T00:27:09,613 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.meta.1732148829587.meta 2024-11-21T00:27:09,613 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655)] 2024-11-21T00:27:09,613 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:09,614 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:09,614 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:09,614 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:27:09,614 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:27:09,614 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:27:09,614 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:09,614 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:27:09,614 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:27:09,626 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:09,629 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:09,629 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:09,633 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:09,633 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:09,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:09,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,635 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:09,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:09,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:09,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:09,636 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:09,639 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740 2024-11-21T00:27:09,644 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740 2024-11-21T00:27:09,649 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:09,649 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:09,651 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:09,661 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:09,665 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60457499, jitterRate=-0.09911306202411652}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:09,665 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:27:09,665 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148829614Writing region info on filesystem at 1732148829614Initializing all the Stores at 1732148829616 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148829617 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148829626 (+9 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148829626Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148829626Cleaning up temporary data from old regions at 1732148829649 (+23 ms)Running coprocessor post-open hooks at 1732148829665 (+16 ms)Region opened successfully at 1732148829665 2024-11-21T00:27:09,671 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148829562 2024-11-21T00:27:09,678 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:27:09,678 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:27:09,682 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:09,684 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33853,1732148826420, state=OPEN 2024-11-21T00:27:09,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01148209107/meta-region-server 2024-11-21T00:27:09,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01148209107/meta-region-server 2024-11-21T00:27:09,769 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:09,769 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01148209107/meta-region-server: CHANGED 2024-11-21T00:27:09,769 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01148209107/meta-region-server: CHANGED 2024-11-21T00:27:09,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:27:09,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33853,1732148826420 in 367 msec 2024-11-21T00:27:09,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:27:09,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 693 msec 2024-11-21T00:27:09,774 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:09,774 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:27:09,777 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:09,777 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33853,1732148826420, seqNum=-1] 2024-11-21T00:27:09,777 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:09,779 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33571, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:09,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3670 sec 2024-11-21T00:27:09,785 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148829785, completionTime=-1 2024-11-21T00:27:09,785 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:27:09,785 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:27:09,787 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:27:09,787 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148889787 2024-11-21T00:27:09,787 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148949787 2024-11-21T00:27:09,787 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:27:09,788 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148825926-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:09,788 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148825926-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:09,788 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148825926-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:09,788 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:42819, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:09,788 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:09,790 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:27:09,792 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.970sec 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148825926-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:09,796 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148825926-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:27:09,805 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:27:09,805 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:27:09,805 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42819,1732148825926-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:09,820 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@248e81fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:09,821 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42819,-1 for getting cluster id 2024-11-21T00:27:09,821 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:09,828 DEBUG [HMaster-EventLoopGroup-31-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7aafd06b-565f-4e2c-a825-a8d6ed1445cb' 2024-11-21T00:27:09,829 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:09,829 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7aafd06b-565f-4e2c-a825-a8d6ed1445cb" 2024-11-21T00:27:09,829 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20a299a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:09,829 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42819,-1] 2024-11-21T00:27:09,830 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:09,830 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:09,831 INFO [HMaster-EventLoopGroup-31-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:09,832 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5122c2b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:09,832 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:09,833 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33853,1732148826420, seqNum=-1] 2024-11-21T00:27:09,834 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:09,835 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57360, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:09,836 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:09,837 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:09,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:09,909 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015ac95b3a0002 connected 2024-11-21T00:27:09,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.log.dir so I do NOT create it in target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc 2024-11-21T00:27:09,952 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:27:09,952 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.tmp.dir so I do NOT create it in target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc 2024-11-21T00:27:09,952 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:27:09,952 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/cluster_e7ac7e7e-01db-540e-7a4f-79aaa6d6cbb7, deleteOnExit=true 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/test.cache.data in system properties and HBase conf 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:27:09,953 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:27:09,954 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:27:09,954 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:09,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:09,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:27:09,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:27:09,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:27:09,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:09,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:27:09,955 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:27:10,775 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:10,788 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:27:10,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:27:10,824 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:27:10,824 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:27:10,829 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:10,829 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44bf1bb8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:27:10,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a51b703{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:27:10,965 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@391811d5{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/java.io.tmpdir/jetty-localhost-33313-hadoop-hdfs-3_4_1-tests_jar-_-any-15070316706052799097/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:27:10,969 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b7fb873{HTTP/1.1, (http/1.1)}{localhost:33313} 2024-11-21T00:27:10,969 INFO [Time-limited test {}] server.Server(415): Started @565518ms 2024-11-21T00:27:11,535 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:11,540 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:27:11,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:27:11,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:27:11,545 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:27:11,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68e9eb4a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:27:11,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4074e033{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:27:11,725 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1028ab63{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/java.io.tmpdir/jetty-localhost-38391-hadoop-hdfs-3_4_1-tests_jar-_-any-16858801475482426519/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:27:11,726 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6bfba3eb{HTTP/1.1, (http/1.1)}{localhost:38391} 2024-11-21T00:27:11,726 INFO [Time-limited test {}] server.Server(415): Started @566275ms 2024-11-21T00:27:11,727 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:27:12,600 WARN [Thread-2826 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/cluster_e7ac7e7e-01db-540e-7a4f-79aaa6d6cbb7/data/data1/current/BP-2008586648-172.17.0.2-1732148830009/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:12,601 WARN [Thread-2827 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/cluster_e7ac7e7e-01db-540e-7a4f-79aaa6d6cbb7/data/data2/current/BP-2008586648-172.17.0.2-1732148830009/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:12,612 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:12,761 WARN [Thread-2814 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:27:12,766 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa1650d27c4c6b7a7 with lease ID 0xec674ac18defec40: Processing first storage report for DS-d99e9ffc-aee0-4df8-97a6-2e07959051f5 from datanode DatanodeRegistration(127.0.0.1:37973, datanodeUuid=45c5d1b8-a541-4b1e-9df1-9bb0a8fd5235, infoPort=33105, infoSecurePort=0, ipcPort=33219, storageInfo=lv=-57;cid=testClusterID;nsid=2033568189;c=1732148830009) 2024-11-21T00:27:12,766 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1650d27c4c6b7a7 with lease ID 0xec674ac18defec40: from storage DS-d99e9ffc-aee0-4df8-97a6-2e07959051f5 node DatanodeRegistration(127.0.0.1:37973, datanodeUuid=45c5d1b8-a541-4b1e-9df1-9bb0a8fd5235, infoPort=33105, infoSecurePort=0, ipcPort=33219, storageInfo=lv=-57;cid=testClusterID;nsid=2033568189;c=1732148830009), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:27:12,766 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa1650d27c4c6b7a7 with lease ID 0xec674ac18defec40: Processing first storage report for DS-e24d9203-60ed-48d7-ba11-abe73f21f555 from datanode DatanodeRegistration(127.0.0.1:37973, datanodeUuid=45c5d1b8-a541-4b1e-9df1-9bb0a8fd5235, infoPort=33105, infoSecurePort=0, ipcPort=33219, storageInfo=lv=-57;cid=testClusterID;nsid=2033568189;c=1732148830009) 2024-11-21T00:27:12,767 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa1650d27c4c6b7a7 with lease ID 0xec674ac18defec40: from storage DS-e24d9203-60ed-48d7-ba11-abe73f21f555 node DatanodeRegistration(127.0.0.1:37973, datanodeUuid=45c5d1b8-a541-4b1e-9df1-9bb0a8fd5235, infoPort=33105, infoSecurePort=0, ipcPort=33219, storageInfo=lv=-57;cid=testClusterID;nsid=2033568189;c=1732148830009), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:27:12,793 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc 2024-11-21T00:27:12,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:12,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:27:12,852 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea with version=8 2024-11-21T00:27:12,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/hbase-staging 2024-11-21T00:27:12,855 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:12,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:12,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:12,856 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:12,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:12,856 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:12,856 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:27:12,856 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:12,864 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41951 2024-11-21T00:27:12,866 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41951 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:12,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419510x0, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:12,901 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41951-0x1015ac95b3a0003 connected 2024-11-21T00:27:12,983 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:12,985 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:12,993 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on znode that does not yet exist, /1-2137408572/running 2024-11-21T00:27:12,993 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea, hbase.cluster.distributed=false 2024-11-21T00:27:12,995 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on znode that does not yet exist, /1-2137408572/acl 2024-11-21T00:27:13,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41951 2024-11-21T00:27:13,020 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41951 2024-11-21T00:27:13,048 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41951 2024-11-21T00:27:13,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41951 2024-11-21T00:27:13,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41951 2024-11-21T00:27:13,097 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:13,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:13,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:13,097 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:13,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:13,097 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:13,098 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:27:13,098 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:13,109 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44369 2024-11-21T00:27:13,110 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44369 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:13,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:13,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:13,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:443690x0, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:13,163 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:443690x0, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on znode that does not yet exist, /1-2137408572/running 2024-11-21T00:27:13,163 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:27:13,184 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44369-0x1015ac95b3a0004 connected 2024-11-21T00:27:13,208 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:27:13,209 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on znode that does not yet exist, /1-2137408572/master 2024-11-21T00:27:13,210 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on znode that does not yet exist, /1-2137408572/acl 2024-11-21T00:27:13,251 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44369 2024-11-21T00:27:13,266 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44369 2024-11-21T00:27:13,277 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44369 2024-11-21T00:27:13,306 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44369 2024-11-21T00:27:13,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44369 2024-11-21T00:27:13,336 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:41951 2024-11-21T00:27:13,337 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-2137408572/backup-masters/5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:13,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572/backup-masters 2024-11-21T00:27:13,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572/backup-masters 2024-11-21T00:27:13,411 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on existing znode=/1-2137408572/backup-masters/5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:13,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:13,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-2137408572/master 2024-11-21T00:27:13,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:13,421 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on existing znode=/1-2137408572/master 2024-11-21T00:27:13,422 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-2137408572/backup-masters/5ed4808ef0e6,41951,1732148832855 from backup master directory 2024-11-21T00:27:13,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-2137408572/backup-masters/5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:13,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572/backup-masters 2024-11-21T00:27:13,435 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:13,435 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:13,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572/backup-masters 2024-11-21T00:27:13,454 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/hbase.id] with ID: fa60625d-922e-41d1-b9e8-6909057e9209 2024-11-21T00:27:13,454 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/.tmp/hbase.id 2024-11-21T00:27:13,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:27:13,499 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/.tmp/hbase.id]:[hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/hbase.id] 2024-11-21T00:27:13,514 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:13,514 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:27:13,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:27:13,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:13,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:13,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:27:13,984 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:13,985 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:27:13,992 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:14,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:27:14,021 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store 2024-11-21T00:27:14,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:27:14,070 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:14,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:14,071 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:14,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:14,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:14,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:14,071 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:14,071 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148834070Disabling compacts and flushes for region at 1732148834070Disabling writes for close at 1732148834071 (+1 ms)Writing region close event to WAL at 1732148834071Closed at 1732148834071 2024-11-21T00:27:14,075 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/.initializing 2024-11-21T00:27:14,076 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/WALs/5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:14,077 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:14,084 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C41951%2C1732148832855, suffix=, logDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/WALs/5ed4808ef0e6,41951,1732148832855, archiveDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/oldWALs, maxLogs=10 2024-11-21T00:27:14,102 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/WALs/5ed4808ef0e6,41951,1732148832855/5ed4808ef0e6%2C41951%2C1732148832855.1732148834084, exclude list is [], retry=0 2024-11-21T00:27:14,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37973,DS-d99e9ffc-aee0-4df8-97a6-2e07959051f5,DISK] 2024-11-21T00:27:14,152 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/WALs/5ed4808ef0e6,41951,1732148832855/5ed4808ef0e6%2C41951%2C1732148832855.1732148834084 2024-11-21T00:27:14,164 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33105:33105)] 2024-11-21T00:27:14,164 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:14,165 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:14,165 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,165 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,172 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,176 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:27:14,176 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:14,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:14,177 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:27:14,178 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:14,178 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:14,179 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:27:14,180 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:14,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:14,180 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,181 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:27:14,181 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:14,182 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:14,182 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,183 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,183 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,184 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,184 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,184 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:14,191 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:14,201 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:14,202 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62027391, jitterRate=-0.07571984827518463}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:14,202 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148834165Initializing all the Stores at 1732148834166 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148834166Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148834171 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148834171Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148834171Cleaning up temporary data from old regions at 1732148834184 (+13 ms)Region opened successfully at 1732148834202 (+18 ms) 2024-11-21T00:27:14,209 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:27:14,221 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:14,221 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@766e3ee9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:14,224 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:27:14,224 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:27:14,224 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:27:14,224 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:27:14,229 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 4 msec 2024-11-21T00:27:14,230 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:27:14,230 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:27:14,249 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:27:14,250 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Unable to get data of znode /1-2137408572/balancer because node does not exist (not necessarily an error) 2024-11-21T00:27:14,360 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-2137408572/balancer already deleted, retry=false 2024-11-21T00:27:14,361 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:27:14,362 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Unable to get data of znode /1-2137408572/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:27:14,478 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-2137408572/normalizer already deleted, retry=false 2024-11-21T00:27:14,480 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:27:14,484 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Unable to get data of znode /1-2137408572/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:27:14,498 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-2137408572/switch/split already deleted, retry=false 2024-11-21T00:27:14,499 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Unable to get data of znode /1-2137408572/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:27:14,509 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-2137408572/switch/merge already deleted, retry=false 2024-11-21T00:27:14,517 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Unable to get data of znode /1-2137408572/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:27:14,526 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-2137408572/snapshot-cleanup already deleted, retry=false 2024-11-21T00:27:14,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-2137408572/running 2024-11-21T00:27:14,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:14,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-2137408572/running 2024-11-21T00:27:14,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:14,538 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,41951,1732148832855, sessionid=0x1015ac95b3a0003, setting cluster-up flag (Was=false) 2024-11-21T00:27:14,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:14,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:14,593 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-2137408572/flush-table-proc/acquired, /1-2137408572/flush-table-proc/reached, /1-2137408572/flush-table-proc/abort 2024-11-21T00:27:14,594 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:14,615 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:14,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:14,653 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-2137408572/online-snapshot/acquired, /1-2137408572/online-snapshot/reached, /1-2137408572/online-snapshot/abort 2024-11-21T00:27:14,654 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:14,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,664 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,665 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,670 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:27:14,676 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,682 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:14,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:27:14,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:27:14,683 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,41951,1732148832855 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:14,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,685 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:14,719 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:14,719 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:27:14,721 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:14,721 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:27:14,754 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(746): ClusterId : fa60625d-922e-41d1-b9e8-6909057e9209 2024-11-21T00:27:14,754 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:27:14,769 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:27:14,769 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:27:14,783 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:27:14,784 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a17d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:14,785 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148864785 2024-11-21T00:27:14,785 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:27:14,785 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:27:14,786 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:27:14,786 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:27:14,786 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:27:14,786 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:27:14,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:27:14,832 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:14,835 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:44369 2024-11-21T00:27:14,835 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:27:14,835 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:27:14,835 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:27:14,836 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,41951,1732148832855 with port=44369, startcode=1732148833096 2024-11-21T00:27:14,836 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:27:14,848 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:27:14,849 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:27:14,849 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:27:14,849 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:27:14,857 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:27:14,857 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:27:14,868 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148834857,5,FailOnTimeoutGroup] 2024-11-21T00:27:14,876 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148834868,5,FailOnTimeoutGroup] 2024-11-21T00:27:14,877 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:14,877 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:27:14,877 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:14,877 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:14,880 INFO [HMaster-EventLoopGroup-33-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45825, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.15 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:27:14,880 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41951 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:14,881 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41951 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:14,883 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea 2024-11-21T00:27:14,883 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46065 2024-11-21T00:27:14,883 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:27:14,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572/rs 2024-11-21T00:27:14,925 DEBUG [RS:0;5ed4808ef0e6:44369 {}] zookeeper.ZKUtil(111): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on existing znode=/1-2137408572/rs/5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:14,925 WARN [RS:0;5ed4808ef0e6:44369 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:14,925 INFO [RS:0;5ed4808ef0e6:44369 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:14,925 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:14,948 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,44369,1732148833096] 2024-11-21T00:27:14,953 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:27:14,954 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:27:14,969 INFO [RS:0;5ed4808ef0e6:44369 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:27:14,969 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:14,984 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:27:14,985 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:27:14,985 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:14,985 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,985 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,985 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,985 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,985 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,985 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:14,986 DEBUG [RS:0;5ed4808ef0e6:44369 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:15,007 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,007 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,007 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,007 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,007 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,007 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44369,1732148833096-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:15,039 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:27:15,040 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,44369,1732148833096-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,040 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,040 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.Replication(171): 5ed4808ef0e6,44369,1732148833096 started 2024-11-21T00:27:15,060 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,060 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,44369,1732148833096, RpcServer on 5ed4808ef0e6/172.17.0.2:44369, sessionid=0x1015ac95b3a0004 2024-11-21T00:27:15,060 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:27:15,060 DEBUG [RS:0;5ed4808ef0e6:44369 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:15,060 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,44369,1732148833096' 2024-11-21T00:27:15,060 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-2137408572/flush-table-proc/abort' 2024-11-21T00:27:15,061 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-2137408572/flush-table-proc/acquired' 2024-11-21T00:27:15,061 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:27:15,061 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:27:15,061 DEBUG [RS:0;5ed4808ef0e6:44369 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:15,061 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,44369,1732148833096' 2024-11-21T00:27:15,061 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-2137408572/online-snapshot/abort' 2024-11-21T00:27:15,061 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-2137408572/online-snapshot/acquired' 2024-11-21T00:27:15,062 DEBUG [RS:0;5ed4808ef0e6:44369 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:27:15,062 INFO [RS:0;5ed4808ef0e6:44369 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:27:15,062 INFO [RS:0;5ed4808ef0e6:44369 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:27:15,162 INFO [RS:0;5ed4808ef0e6:44369 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:15,165 INFO [RS:0;5ed4808ef0e6:44369 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C44369%2C1732148833096, suffix=, logDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096, archiveDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/oldWALs, maxLogs=10 2024-11-21T00:27:15,181 DEBUG [RS:0;5ed4808ef0e6:44369 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, exclude list is [], retry=0 2024-11-21T00:27:15,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37973,DS-d99e9ffc-aee0-4df8-97a6-2e07959051f5,DISK] 2024-11-21T00:27:15,201 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:27:15,214 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:27:15,214 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea 2024-11-21T00:27:15,216 INFO [RS:0;5ed4808ef0e6:44369 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 2024-11-21T00:27:15,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,232 DEBUG [RS:0;5ed4808ef0e6:44369 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33105:33105)] 2024-11-21T00:27:15,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:27:15,272 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:15,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,278 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,279 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,284 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:15,286 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:15,286 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,286 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,287 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:15,288 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:15,288 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,289 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:15,289 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,290 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:15,290 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,291 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:15,292 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:15,292 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,294 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,294 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:15,294 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740 2024-11-21T00:27:15,295 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740 2024-11-21T00:27:15,296 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:15,296 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:15,297 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:15,299 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:15,304 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:15,308 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:15,312 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72345502, jitterRate=0.078031986951828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:15,312 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148835272Initializing all the Stores at 1732148835272Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148835273 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148835284 (+11 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148835284Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148835284Cleaning up temporary data from old regions at 1732148835296 (+12 ms)Region opened successfully at 1732148835312 (+16 ms) 2024-11-21T00:27:15,312 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:15,312 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:15,312 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:15,312 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:15,312 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:15,320 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:15,321 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148835312Disabling compacts and flushes for region at 1732148835312Disabling writes for close at 1732148835312Writing region close event to WAL at 1732148835320 (+8 ms)Closed at 1732148835320 2024-11-21T00:27:15,322 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:15,322 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:27:15,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:27:15,323 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:15,326 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:27:15,327 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:27:15,477 DEBUG [5ed4808ef0e6:41951 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:27:15,478 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:15,479 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,44369,1732148833096, state=OPENING 2024-11-21T00:27:15,488 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:27:15,500 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:15,522 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:15,522 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:15,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,44369,1732148833096}] 2024-11-21T00:27:15,522 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-2137408572/meta-region-server: CHANGED 2024-11-21T00:27:15,523 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-2137408572/meta-region-server: CHANGED 2024-11-21T00:27:15,684 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:15,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-34-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49481, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:27:15,721 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:27:15,721 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:15,721 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:27:15,736 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C44369%2C1732148833096.meta, suffix=.meta, logDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096, archiveDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/oldWALs, maxLogs=10 2024-11-21T00:27:15,760 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.meta.1732148835737.meta, exclude list is [], retry=0 2024-11-21T00:27:15,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37973,DS-d99e9ffc-aee0-4df8-97a6-2e07959051f5,DISK] 2024-11-21T00:27:15,804 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.meta.1732148835737.meta 2024-11-21T00:27:15,816 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33105:33105)] 2024-11-21T00:27:15,817 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:15,817 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:15,817 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:15,817 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:27:15,817 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:27:15,817 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:27:15,818 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:15,818 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:27:15,818 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:27:15,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:15,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:15,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:15,836 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:15,836 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,836 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:15,837 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:15,837 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,838 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:15,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:15,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:15,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:15,839 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:15,840 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740 2024-11-21T00:27:15,842 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740 2024-11-21T00:27:15,843 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:15,844 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:15,844 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:15,848 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:15,849 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68568310, jitterRate=0.021747440099716187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:15,849 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:27:15,849 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148835820Writing region info on filesystem at 1732148835820Initializing all the Stores at 1732148835822 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148835822Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148835833 (+11 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148835833Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148835833Cleaning up temporary data from old regions at 1732148835844 (+11 ms)Running coprocessor post-open hooks at 1732148835849 (+5 ms)Region opened successfully at 1732148835849 2024-11-21T00:27:15,850 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148835684 2024-11-21T00:27:15,853 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:15,854 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,44369,1732148833096, state=OPEN 2024-11-21T00:27:15,854 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:27:15,854 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:27:15,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-2137408572/meta-region-server 2024-11-21T00:27:15,863 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-2137408572/meta-region-server: CHANGED 2024-11-21T00:27:15,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-2137408572/meta-region-server 2024-11-21T00:27:15,863 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-2137408572/meta-region-server: CHANGED 2024-11-21T00:27:15,864 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:15,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:27:15,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,44369,1732148833096 in 342 msec 2024-11-21T00:27:15,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:27:15,872 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 548 msec 2024-11-21T00:27:15,873 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:15,873 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:27:15,874 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:15,874 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44369,1732148833096, seqNum=-1] 2024-11-21T00:27:15,875 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:15,876 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-34-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60069, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:15,886 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2040 sec 2024-11-21T00:27:15,887 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148835886, completionTime=-1 2024-11-21T00:27:15,887 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:27:15,887 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:27:15,890 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:27:15,890 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148895890 2024-11-21T00:27:15,890 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148955890 2024-11-21T00:27:15,890 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 3 msec 2024-11-21T00:27:15,891 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41951,1732148832855-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,891 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41951,1732148832855-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,891 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41951,1732148832855-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,891 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:41951, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,891 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,892 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,895 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.462sec 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41951,1732148832855-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:15,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41951,1732148832855-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:27:15,916 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:27:15,916 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:27:15,916 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,41951,1732148832855-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:15,961 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27da5f12, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:15,961 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,41951,-1 for getting cluster id 2024-11-21T00:27:15,961 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:15,963 DEBUG [HMaster-EventLoopGroup-33-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fa60625d-922e-41d1-b9e8-6909057e9209' 2024-11-21T00:27:15,964 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:15,964 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fa60625d-922e-41d1-b9e8-6909057e9209" 2024-11-21T00:27:15,964 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@260a8e54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:15,964 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,41951,-1] 2024-11-21T00:27:15,965 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:15,965 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:15,966 INFO [HMaster-EventLoopGroup-33-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:15,967 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e900bfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:15,967 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:15,968 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44369,1732148833096, seqNum=-1] 2024-11-21T00:27:15,969 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:15,970 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-34-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45148, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:15,972 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:15,973 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:15,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:16,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.log.dir so I do NOT create it in target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167 2024-11-21T00:27:16,013 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:27:16,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.tmp.dir so I do NOT create it in target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167 2024-11-21T00:27:16,013 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:27:16,013 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167 2024-11-21T00:27:16,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:27:16,013 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/cluster_86413a8d-8fa4-d205-de8d-389e28773bd9, deleteOnExit=true 2024-11-21T00:27:16,013 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:27:16,016 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015ac95b3a0005 connected 2024-11-21T00:27:16,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/test.cache.data in system properties and HBase conf 2024-11-21T00:27:16,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:27:16,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:27:16,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:27:16,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:27:16,017 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:27:16,017 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:27:16,018 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:16,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:27:16,019 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:27:16,486 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:16,491 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:27:16,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:27:16,544 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:27:16,544 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:27:16,549 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:16,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fec52ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:27:16,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dcb40b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:27:16,683 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@141afe7a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/java.io.tmpdir/jetty-localhost-41299-hadoop-hdfs-3_4_1-tests_jar-_-any-7061665653568242670/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:27:16,685 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f702d45{HTTP/1.1, (http/1.1)}{localhost:41299} 2024-11-21T00:27:16,685 INFO [Time-limited test {}] server.Server(415): Started @571235ms 2024-11-21T00:27:17,240 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:17,255 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:27:17,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:27:17,308 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:27:17,308 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:27:17,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70d4617e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:27:17,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79874c3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:27:17,451 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2adf0652{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/java.io.tmpdir/jetty-localhost-45791-hadoop-hdfs-3_4_1-tests_jar-_-any-754466243856604188/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:27:17,452 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f055b0d{HTTP/1.1, (http/1.1)}{localhost:45791} 2024-11-21T00:27:17,452 INFO [Time-limited test {}] server.Server(415): Started @572001ms 2024-11-21T00:27:17,454 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:27:18,769 WARN [Thread-2950 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/cluster_86413a8d-8fa4-d205-de8d-389e28773bd9/data/data2/current/BP-651598342-172.17.0.2-1732148836043/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:18,772 WARN [Thread-2949 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/cluster_86413a8d-8fa4-d205-de8d-389e28773bd9/data/data1/current/BP-651598342-172.17.0.2-1732148836043/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:18,797 WARN [Thread-2937 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:27:18,805 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc60796dc054fdbce with lease ID 0x4b7e4117715ccb97: Processing first storage report for DS-1b285dae-a468-4813-ae49-fa9fd5915b2b from datanode DatanodeRegistration(127.0.0.1:38013, datanodeUuid=47717199-df6d-4f3b-80f3-c504122bf7c4, infoPort=43027, infoSecurePort=0, ipcPort=35983, storageInfo=lv=-57;cid=testClusterID;nsid=367815214;c=1732148836043) 2024-11-21T00:27:18,805 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc60796dc054fdbce with lease ID 0x4b7e4117715ccb97: from storage DS-1b285dae-a468-4813-ae49-fa9fd5915b2b node DatanodeRegistration(127.0.0.1:38013, datanodeUuid=47717199-df6d-4f3b-80f3-c504122bf7c4, infoPort=43027, infoSecurePort=0, ipcPort=35983, storageInfo=lv=-57;cid=testClusterID;nsid=367815214;c=1732148836043), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T00:27:18,805 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc60796dc054fdbce with lease ID 0x4b7e4117715ccb97: Processing first storage report for DS-dfef576e-c39d-483a-99f9-3b3e2934b4e0 from datanode DatanodeRegistration(127.0.0.1:38013, datanodeUuid=47717199-df6d-4f3b-80f3-c504122bf7c4, infoPort=43027, infoSecurePort=0, ipcPort=35983, storageInfo=lv=-57;cid=testClusterID;nsid=367815214;c=1732148836043) 2024-11-21T00:27:18,805 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc60796dc054fdbce with lease ID 0x4b7e4117715ccb97: from storage DS-dfef576e-c39d-483a-99f9-3b3e2934b4e0 node DatanodeRegistration(127.0.0.1:38013, datanodeUuid=47717199-df6d-4f3b-80f3-c504122bf7c4, infoPort=43027, infoSecurePort=0, ipcPort=35983, storageInfo=lv=-57;cid=testClusterID;nsid=367815214;c=1732148836043), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:27:18,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167 2024-11-21T00:27:18,807 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:18,808 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:18,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:27:19,231 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e with version=8 2024-11-21T00:27:19,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/hbase-staging 2024-11-21T00:27:19,233 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:19,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:19,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:19,233 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:19,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:19,233 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:19,233 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:27:19,233 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:19,234 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46151 2024-11-21T00:27:19,235 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46151 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:19,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461510x0, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:19,246 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46151-0x1015ac95b3a0006 connected 2024-11-21T00:27:19,726 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:19,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:19,729 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on znode that does not yet exist, /2-559595819/running 2024-11-21T00:27:19,729 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e, hbase.cluster.distributed=false 2024-11-21T00:27:19,731 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on znode that does not yet exist, /2-559595819/acl 2024-11-21T00:27:19,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46151 2024-11-21T00:27:19,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46151 2024-11-21T00:27:19,740 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46151 2024-11-21T00:27:19,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46151 2024-11-21T00:27:19,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46151 2024-11-21T00:27:19,767 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:19,768 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:19,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:19,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:27:19,768 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:19,796 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33687 2024-11-21T00:27:19,797 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33687 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:19,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:19,800 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:19,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336870x0, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:19,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:336870x0, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on znode that does not yet exist, /2-559595819/running 2024-11-21T00:27:19,863 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33687-0x1015ac95b3a0007 connected 2024-11-21T00:27:19,863 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:27:19,864 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:27:19,864 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on znode that does not yet exist, /2-559595819/master 2024-11-21T00:27:19,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on znode that does not yet exist, /2-559595819/acl 2024-11-21T00:27:19,868 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33687 2024-11-21T00:27:19,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33687 2024-11-21T00:27:19,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33687 2024-11-21T00:27:19,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33687 2024-11-21T00:27:19,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33687 2024-11-21T00:27:19,917 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:46151 2024-11-21T00:27:19,917 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /2-559595819/backup-masters/5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:19,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819/backup-masters 2024-11-21T00:27:19,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819/backup-masters 2024-11-21T00:27:19,930 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on existing znode=/2-559595819/backup-masters/5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:19,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:19,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/2-559595819/master 2024-11-21T00:27:19,940 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:19,940 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on existing znode=/2-559595819/master 2024-11-21T00:27:19,940 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /2-559595819/backup-masters/5ed4808ef0e6,46151,1732148839233 from backup master directory 2024-11-21T00:27:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819/backup-masters 2024-11-21T00:27:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-559595819/backup-masters/5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:19,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819/backup-masters 2024-11-21T00:27:19,950 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:19,950 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:19,954 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/hbase.id] with ID: 46c96938-ca9c-4723-82fe-8a99d3a89209 2024-11-21T00:27:19,954 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/.tmp/hbase.id 2024-11-21T00:27:19,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:27:19,963 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/.tmp/hbase.id]:[hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/hbase.id] 2024-11-21T00:27:19,972 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:19,972 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:27:19,973 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:27:19,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:19,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:19,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:27:20,142 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:20,387 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:20,388 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:27:20,389 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:20,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:27:20,395 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store 2024-11-21T00:27:20,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:27:20,400 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:20,400 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:20,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:20,400 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:20,400 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:20,400 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:20,400 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:20,400 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148840400Disabling compacts and flushes for region at 1732148840400Disabling writes for close at 1732148840400Writing region close event to WAL at 1732148840400Closed at 1732148840400 2024-11-21T00:27:20,401 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/.initializing 2024-11-21T00:27:20,401 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/WALs/5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:20,402 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:20,405 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C46151%2C1732148839233, suffix=, logDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/WALs/5ed4808ef0e6,46151,1732148839233, archiveDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/oldWALs, maxLogs=10 2024-11-21T00:27:20,421 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/WALs/5ed4808ef0e6,46151,1732148839233/5ed4808ef0e6%2C46151%2C1732148839233.1732148840405, exclude list is [], retry=0 2024-11-21T00:27:20,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38013,DS-1b285dae-a468-4813-ae49-fa9fd5915b2b,DISK] 2024-11-21T00:27:20,430 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/WALs/5ed4808ef0e6,46151,1732148839233/5ed4808ef0e6%2C46151%2C1732148839233.1732148840405 2024-11-21T00:27:20,432 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43027:43027)] 2024-11-21T00:27:20,432 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:20,432 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:20,433 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,433 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:27:20,435 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:20,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:20,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,436 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:27:20,436 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:20,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:20,437 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:27:20,439 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:20,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:20,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:27:20,441 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:20,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:20,441 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,442 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,442 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:20,444 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:20,446 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:20,446 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69522055, jitterRate=0.03595934808254242}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:20,446 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148840433Initializing all the Stores at 1732148840433Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148840433Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148840434 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148840434Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148840434Cleaning up temporary data from old regions at 1732148840443 (+9 ms)Region opened successfully at 1732148840446 (+3 ms) 2024-11-21T00:27:20,446 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:27:20,450 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ddc2987, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:20,450 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:27:20,450 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:27:20,450 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:27:20,451 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:27:20,451 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:27:20,451 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:27:20,451 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:27:20,453 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:27:20,453 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Unable to get data of znode /2-559595819/balancer because node does not exist (not necessarily an error) 2024-11-21T00:27:20,462 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-559595819/balancer already deleted, retry=false 2024-11-21T00:27:20,463 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:27:20,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Unable to get data of znode /2-559595819/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:27:20,484 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-559595819/normalizer already deleted, retry=false 2024-11-21T00:27:20,484 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:27:20,485 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Unable to get data of znode /2-559595819/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:27:20,500 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-559595819/switch/split already deleted, retry=false 2024-11-21T00:27:20,501 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Unable to get data of znode /2-559595819/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:27:20,508 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-559595819/switch/merge already deleted, retry=false 2024-11-21T00:27:20,511 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Unable to get data of znode /2-559595819/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:27:20,519 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-559595819/snapshot-cleanup already deleted, retry=false 2024-11-21T00:27:20,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/2-559595819/running 2024-11-21T00:27:20,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:20,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/2-559595819/running 2024-11-21T00:27:20,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:20,530 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,46151,1732148839233, sessionid=0x1015ac95b3a0006, setting cluster-up flag (Was=false) 2024-11-21T00:27:20,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:20,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:20,584 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /2-559595819/flush-table-proc/acquired, /2-559595819/flush-table-proc/reached, /2-559595819/flush-table-proc/abort 2024-11-21T00:27:20,585 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:20,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:20,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:20,635 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /2-559595819/online-snapshot/acquired, /2-559595819/online-snapshot/reached, /2-559595819/online-snapshot/abort 2024-11-21T00:27:20,637 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:20,639 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:27:20,641 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:20,641 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:27:20,641 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:27:20,642 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,46151,1732148839233 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:27:20,644 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:20,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:20,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:20,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:20,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:27:20,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:20,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,657 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:20,658 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:27:20,659 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:20,660 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:27:20,677 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148870676 2024-11-21T00:27:20,677 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:27:20,677 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:27:20,677 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:27:20,677 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:27:20,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:27:20,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:27:20,680 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,684 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:27:20,684 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:27:20,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:27:20,684 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:27:20,685 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:27:20,688 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:27:20,688 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:27:20,692 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148840688,5,FailOnTimeoutGroup] 2024-11-21T00:27:20,696 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148840692,5,FailOnTimeoutGroup] 2024-11-21T00:27:20,696 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,696 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:27:20,696 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,697 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,697 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(746): ClusterId : 46c96938-ca9c-4723-82fe-8a99d3a89209 2024-11-21T00:27:20,697 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:27:20,709 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:27:20,709 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:27:20,719 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:27:20,720 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@236e732d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:20,734 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:33687 2024-11-21T00:27:20,734 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:27:20,734 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:27:20,734 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:27:20,735 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,46151,1732148839233 with port=33687, startcode=1732148839767 2024-11-21T00:27:20,735 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:27:20,736 INFO [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33629, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:27:20,737 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46151 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:20,737 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46151 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:20,738 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e 2024-11-21T00:27:20,738 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45089 2024-11-21T00:27:20,738 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:27:20,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819/rs 2024-11-21T00:27:20,780 DEBUG [RS:0;5ed4808ef0e6:33687 {}] zookeeper.ZKUtil(111): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on existing znode=/2-559595819/rs/5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:20,780 WARN [RS:0;5ed4808ef0e6:33687 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:20,780 INFO [RS:0;5ed4808ef0e6:33687 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:20,780 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:20,780 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,33687,1732148839767] 2024-11-21T00:27:20,783 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:27:20,784 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:27:20,785 INFO [RS:0;5ed4808ef0e6:33687 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:27:20,785 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,788 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:27:20,789 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:27:20,789 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:20,789 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:20,790 DEBUG [RS:0;5ed4808ef0e6:33687 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:20,800 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,800 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,800 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,800 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,800 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,800 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33687,1732148839767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:20,818 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:27:20,818 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,33687,1732148839767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,818 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,818 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.Replication(171): 5ed4808ef0e6,33687,1732148839767 started 2024-11-21T00:27:20,836 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:20,836 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,33687,1732148839767, RpcServer on 5ed4808ef0e6/172.17.0.2:33687, sessionid=0x1015ac95b3a0007 2024-11-21T00:27:20,836 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:27:20,836 DEBUG [RS:0;5ed4808ef0e6:33687 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:20,836 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33687,1732148839767' 2024-11-21T00:27:20,836 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/2-559595819/flush-table-proc/abort' 2024-11-21T00:27:20,837 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/2-559595819/flush-table-proc/acquired' 2024-11-21T00:27:20,837 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:27:20,837 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:27:20,837 DEBUG [RS:0;5ed4808ef0e6:33687 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:20,837 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,33687,1732148839767' 2024-11-21T00:27:20,837 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/2-559595819/online-snapshot/abort' 2024-11-21T00:27:20,840 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/2-559595819/online-snapshot/acquired' 2024-11-21T00:27:20,840 DEBUG [RS:0;5ed4808ef0e6:33687 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:27:20,840 INFO [RS:0;5ed4808ef0e6:33687 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:27:20,840 INFO [RS:0;5ed4808ef0e6:33687 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:27:20,941 INFO [RS:0;5ed4808ef0e6:33687 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:20,942 INFO [RS:0;5ed4808ef0e6:33687 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33687%2C1732148839767, suffix=, logDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767, archiveDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/oldWALs, maxLogs=10 2024-11-21T00:27:20,960 DEBUG [RS:0;5ed4808ef0e6:33687 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, exclude list is [], retry=0 2024-11-21T00:27:20,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38013,DS-1b285dae-a468-4813-ae49-fa9fd5915b2b,DISK] 2024-11-21T00:27:20,964 INFO [RS:0;5ed4808ef0e6:33687 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 2024-11-21T00:27:20,964 DEBUG [RS:0;5ed4808ef0e6:33687 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43027:43027)] 2024-11-21T00:27:21,086 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:27:21,086 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e 2024-11-21T00:27:21,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:27:21,234 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:21,320 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:27:21,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,385 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,395 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,402 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:21,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:21,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:21,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:21,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:21,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:21,515 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:21,517 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:21,517 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:21,518 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:21,518 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:21,519 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740 2024-11-21T00:27:21,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740 2024-11-21T00:27:21,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:21,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:21,521 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:21,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:21,525 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:21,525 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72802052, jitterRate=0.08483511209487915}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:21,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148841509Initializing all the Stores at 1732148841511 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148841511Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148841511Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148841511Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148841511Cleaning up temporary data from old regions at 1732148841521 (+10 ms)Region opened successfully at 1732148841525 (+4 ms) 2024-11-21T00:27:21,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:21,525 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:21,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:21,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:21,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:21,527 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:21,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148841525Disabling compacts and flushes for region at 1732148841525Disabling writes for close at 1732148841526 (+1 ms)Writing region close event to WAL at 1732148841526Closed at 1732148841527 (+1 ms) 2024-11-21T00:27:21,528 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:21,528 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:27:21,528 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:27:21,529 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:21,531 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:27:21,681 DEBUG [5ed4808ef0e6:46151 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:27:21,681 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:21,684 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33687,1732148839767, state=OPENING 2024-11-21T00:27:21,695 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:27:21,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:21,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:21,709 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:21,709 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-559595819/meta-region-server: CHANGED 2024-11-21T00:27:21,709 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-559595819/meta-region-server: CHANGED 2024-11-21T00:27:21,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33687,1732148839767}] 2024-11-21T00:27:21,861 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:21,862 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:27:21,865 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:27:21,865 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:21,865 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:27:21,866 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33687%2C1732148839767.meta, suffix=.meta, logDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767, archiveDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/oldWALs, maxLogs=10 2024-11-21T00:27:21,880 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.meta.1732148841866.meta, exclude list is [], retry=0 2024-11-21T00:27:21,882 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38013,DS-1b285dae-a468-4813-ae49-fa9fd5915b2b,DISK] 2024-11-21T00:27:21,883 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.meta.1732148841866.meta 2024-11-21T00:27:21,883 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43027:43027)] 2024-11-21T00:27:21,883 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:21,883 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:21,884 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:21,884 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:27:21,884 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:27:21,884 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:27:21,884 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:21,884 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:27:21,884 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:27:21,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:21,890 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:21,890 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,891 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:21,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:21,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:21,892 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:21,892 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:21,893 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:21,893 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:21,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:21,893 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:21,896 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740 2024-11-21T00:27:21,897 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740 2024-11-21T00:27:21,898 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:21,898 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:21,899 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:21,900 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:21,900 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60494132, jitterRate=-0.0985671877861023}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:21,900 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:27:21,901 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148841884Writing region info on filesystem at 1732148841884Initializing all the Stores at 1732148841888 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148841888Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148841889 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148841889Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148841889Cleaning up temporary data from old regions at 1732148841898 (+9 ms)Running coprocessor post-open hooks at 1732148841900 (+2 ms)Region opened successfully at 1732148841900 2024-11-21T00:27:21,901 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148841860 2024-11-21T00:27:21,903 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:27:21,903 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:27:21,903 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:21,904 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,33687,1732148839767, state=OPEN 2024-11-21T00:27:21,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/2-559595819/meta-region-server 2024-11-21T00:27:21,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/2-559595819/meta-region-server 2024-11-21T00:27:21,915 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-559595819/meta-region-server: CHANGED 2024-11-21T00:27:21,915 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-559595819/meta-region-server: CHANGED 2024-11-21T00:27:21,915 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:21,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:27:21,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,33687,1732148839767 in 206 msec 2024-11-21T00:27:21,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:27:21,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 389 msec 2024-11-21T00:27:21,919 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:21,919 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:27:21,920 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:21,921 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33687,1732148839767, seqNum=-1] 2024-11-21T00:27:21,921 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:21,922 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39815, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:21,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.3120 sec 2024-11-21T00:27:21,961 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148841961, completionTime=-1 2024-11-21T00:27:21,961 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:27:21,961 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148901964 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148961964 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46151,1732148839233-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46151,1732148839233-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46151,1732148839233-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:46151, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:21,964 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:21,966 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:27:21,972 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:21,980 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.030sec 2024-11-21T00:27:21,980 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:27:21,980 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:27:21,980 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:27:21,980 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:27:21,980 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:27:21,981 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46151,1732148839233-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:21,981 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46151,1732148839233-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:27:21,998 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:27:21,998 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:27:21,998 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,46151,1732148839233-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:22,000 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75067113, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:22,001 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:22,001 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:22,002 DEBUG [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:22,002 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:22,003 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:22,003 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76c87e40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:22,003 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:22,003 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:22,003 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:22,004 INFO [HMaster-EventLoopGroup-35-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35880, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:22,005 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d42760c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:22,006 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:22,007 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33687,1732148839767, seqNum=-1] 2024-11-21T00:27:22,008 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:22,010 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46042, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:22,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:22,013 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster2 connecting to ZooKeeper ensemble=127.0.0.1:50082 2024-11-21T00:27:22,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster20x0, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:22,032 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster2-0x1015ac95b3a0008 connected 2024-11-21T00:27:22,043 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:22,044 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:22,044 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5291bfbd 2024-11-21T00:27:22,044 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:22,045 INFO [HMaster-EventLoopGroup-31-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53690, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:22,046 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:22,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:27:22,048 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:22,048 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:22,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:27:22,049 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:22,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:22,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:27:22,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:22,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:22,460 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 69eb84528d7bfcc8bfbb1997d42d5d19, NAME => 'test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b 2024-11-21T00:27:22,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:27:22,470 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:22,470 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing 69eb84528d7bfcc8bfbb1997d42d5d19, disabling compactions & flushes 2024-11-21T00:27:22,470 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:22,470 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:22,470 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. after waiting 0 ms 2024-11-21T00:27:22,470 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:22,470 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:22,470 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for 69eb84528d7bfcc8bfbb1997d42d5d19: Waiting for close lock at 1732148842470Disabling compacts and flushes for region at 1732148842470Disabling writes for close at 1732148842470Writing region close event to WAL at 1732148842470Closed at 1732148842470 2024-11-21T00:27:22,472 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:22,472 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148842472"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148842472"}]},"ts":"1732148842472"} 2024-11-21T00:27:22,474 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:27:22,474 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:22,475 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148842474"}]},"ts":"1732148842474"} 2024-11-21T00:27:22,476 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:27:22,477 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=69eb84528d7bfcc8bfbb1997d42d5d19, ASSIGN}] 2024-11-21T00:27:22,478 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=69eb84528d7bfcc8bfbb1997d42d5d19, ASSIGN 2024-11-21T00:27:22,478 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=69eb84528d7bfcc8bfbb1997d42d5d19, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,33853,1732148826420; forceNewPlan=false, retain=false 2024-11-21T00:27:22,629 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=69eb84528d7bfcc8bfbb1997d42d5d19, regionState=OPENING, regionLocation=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:22,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=69eb84528d7bfcc8bfbb1997d42d5d19, ASSIGN because future has completed 2024-11-21T00:27:22,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69eb84528d7bfcc8bfbb1997d42d5d19, server=5ed4808ef0e6,33853,1732148826420}] 2024-11-21T00:27:22,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:22,788 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:22,788 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 69eb84528d7bfcc8bfbb1997d42d5d19, NAME => 'test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:22,789 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:22,789 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:27:22,789 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,789 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:22,789 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,789 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,790 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,791 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69eb84528d7bfcc8bfbb1997d42d5d19 columnFamilyName f 2024-11-21T00:27:22,791 DEBUG [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:22,792 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] regionserver.HStore(327): Store=69eb84528d7bfcc8bfbb1997d42d5d19/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:22,792 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,793 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69eb84528d7bfcc8bfbb1997d42d5d19 columnFamilyName f1 2024-11-21T00:27:22,793 DEBUG [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:22,793 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] regionserver.HStore(327): Store=69eb84528d7bfcc8bfbb1997d42d5d19/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:22,793 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,794 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69eb84528d7bfcc8bfbb1997d42d5d19 columnFamilyName norep 2024-11-21T00:27:22,794 DEBUG [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:22,794 INFO [StoreOpener-69eb84528d7bfcc8bfbb1997d42d5d19-1 {}] regionserver.HStore(327): Store=69eb84528d7bfcc8bfbb1997d42d5d19/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:22,795 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,795 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,795 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,796 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,796 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,796 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:27:22,798 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,802 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:22,802 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 69eb84528d7bfcc8bfbb1997d42d5d19; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65754438, jitterRate=-0.020182520151138306}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:27:22,802 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:22,803 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 69eb84528d7bfcc8bfbb1997d42d5d19: Running coprocessor pre-open hook at 1732148842789Writing region info on filesystem at 1732148842789Initializing all the Stores at 1732148842790 (+1 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148842790Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148842790Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148842790Cleaning up temporary data from old regions at 1732148842796 (+6 ms)Running coprocessor post-open hooks at 1732148842802 (+6 ms)Region opened successfully at 1732148842803 (+1 ms) 2024-11-21T00:27:22,803 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19., pid=6, masterSystemTime=1732148842784 2024-11-21T00:27:22,805 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:22,805 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:22,806 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=69eb84528d7bfcc8bfbb1997d42d5d19, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:22,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69eb84528d7bfcc8bfbb1997d42d5d19, server=5ed4808ef0e6,33853,1732148826420 because future has completed 2024-11-21T00:27:22,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:27:22,812 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 69eb84528d7bfcc8bfbb1997d42d5d19, server=5ed4808ef0e6,33853,1732148826420 in 178 msec 2024-11-21T00:27:22,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:27:22,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=69eb84528d7bfcc8bfbb1997d42d5d19, ASSIGN in 335 msec 2024-11-21T00:27:22,818 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:22,818 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148842818"}]},"ts":"1732148842818"} 2024-11-21T00:27:22,820 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:27:22,821 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:22,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 775 msec 2024-11-21T00:27:23,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:23,188 INFO [RPCClient-NioEventLoopGroup-4-16 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:27:23,188 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:23,189 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:23,189 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@352b41c3 2024-11-21T00:27:23,189 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:23,190 INFO [HMaster-EventLoopGroup-33-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50282, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:23,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:23,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:27:23,194 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:23,194 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:23,194 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:27:23,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:23,200 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:23,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:27:23,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:23,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:23,653 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => fa18a0bdbdf880445723239afb964dd9, NAME => 'test,,1732148843191.fa18a0bdbdf880445723239afb964dd9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea 2024-11-21T00:27:23,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:27:23,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:24,083 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148843191.fa18a0bdbdf880445723239afb964dd9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:24,083 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing fa18a0bdbdf880445723239afb964dd9, disabling compactions & flushes 2024-11-21T00:27:24,083 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:24,083 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:24,083 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. after waiting 0 ms 2024-11-21T00:27:24,083 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:24,083 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:24,083 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for fa18a0bdbdf880445723239afb964dd9: Waiting for close lock at 1732148844083Disabling compacts and flushes for region at 1732148844083Disabling writes for close at 1732148844083Writing region close event to WAL at 1732148844083Closed at 1732148844083 2024-11-21T00:27:24,086 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:24,086 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148843191.fa18a0bdbdf880445723239afb964dd9.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148844086"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148844086"}]},"ts":"1732148844086"} 2024-11-21T00:27:24,090 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:27:24,092 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:24,092 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148844092"}]},"ts":"1732148844092"} 2024-11-21T00:27:24,096 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:27:24,096 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=fa18a0bdbdf880445723239afb964dd9, ASSIGN}] 2024-11-21T00:27:24,099 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=fa18a0bdbdf880445723239afb964dd9, ASSIGN 2024-11-21T00:27:24,100 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=fa18a0bdbdf880445723239afb964dd9, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,44369,1732148833096; forceNewPlan=false, retain=false 2024-11-21T00:27:24,251 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fa18a0bdbdf880445723239afb964dd9, regionState=OPENING, regionLocation=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:24,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=fa18a0bdbdf880445723239afb964dd9, ASSIGN because future has completed 2024-11-21T00:27:24,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa18a0bdbdf880445723239afb964dd9, server=5ed4808ef0e6,44369,1732148833096}] 2024-11-21T00:27:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:24,408 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:24,408 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => fa18a0bdbdf880445723239afb964dd9, NAME => 'test,,1732148843191.fa18a0bdbdf880445723239afb964dd9.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:24,408 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:24,408 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:27:24,409 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,409 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148843191.fa18a0bdbdf880445723239afb964dd9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:24,409 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,409 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,410 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,411 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa18a0bdbdf880445723239afb964dd9 columnFamilyName f 2024-11-21T00:27:24,411 DEBUG [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:24,411 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] regionserver.HStore(327): Store=fa18a0bdbdf880445723239afb964dd9/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:24,411 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,412 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa18a0bdbdf880445723239afb964dd9 columnFamilyName f1 2024-11-21T00:27:24,412 DEBUG [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:24,413 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] regionserver.HStore(327): Store=fa18a0bdbdf880445723239afb964dd9/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:24,413 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,414 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fa18a0bdbdf880445723239afb964dd9 columnFamilyName norep 2024-11-21T00:27:24,414 DEBUG [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:24,414 INFO [StoreOpener-fa18a0bdbdf880445723239afb964dd9-1 {}] regionserver.HStore(327): Store=fa18a0bdbdf880445723239afb964dd9/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:24,414 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,415 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,415 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,416 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,416 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,416 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:27:24,417 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,419 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:24,419 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened fa18a0bdbdf880445723239afb964dd9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59358898, jitterRate=-0.11548349261283875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:27:24,419 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:24,419 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for fa18a0bdbdf880445723239afb964dd9: Running coprocessor pre-open hook at 1732148844409Writing region info on filesystem at 1732148844409Initializing all the Stores at 1732148844409Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148844410 (+1 ms)Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148844410Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148844410Cleaning up temporary data from old regions at 1732148844416 (+6 ms)Running coprocessor post-open hooks at 1732148844419 (+3 ms)Region opened successfully at 1732148844419 2024-11-21T00:27:24,420 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148843191.fa18a0bdbdf880445723239afb964dd9., pid=6, masterSystemTime=1732148844405 2024-11-21T00:27:24,422 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:24,422 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:24,422 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=fa18a0bdbdf880445723239afb964dd9, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:24,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure fa18a0bdbdf880445723239afb964dd9, server=5ed4808ef0e6,44369,1732148833096 because future has completed 2024-11-21T00:27:24,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:27:24,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure fa18a0bdbdf880445723239afb964dd9, server=5ed4808ef0e6,44369,1732148833096 in 172 msec 2024-11-21T00:27:24,428 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:27:24,428 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=fa18a0bdbdf880445723239afb964dd9, ASSIGN in 330 msec 2024-11-21T00:27:24,428 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:24,428 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148844428"}]},"ts":"1732148844428"} 2024-11-21T00:27:24,430 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:27:24,430 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:24,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 1.2390 sec 2024-11-21T00:27:25,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:25,338 INFO [RPCClient-NioEventLoopGroup-4-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:27:25,338 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:25,339 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:25,339 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6933c93f 2024-11-21T00:27:25,339 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:25,340 INFO [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35896, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:25,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:25,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:27:25,342 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:25,342 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:25,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:27:25,343 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:25,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:25,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:27:25,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:25,754 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b1acf010e5e0a45cfda5ab304dae89d4, NAME => 'test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e 2024-11-21T00:27:25,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:27:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:26,159 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:26,159 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing b1acf010e5e0a45cfda5ab304dae89d4, disabling compactions & flushes 2024-11-21T00:27:26,159 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:26,159 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:26,159 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. after waiting 0 ms 2024-11-21T00:27:26,159 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:26,159 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:26,160 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for b1acf010e5e0a45cfda5ab304dae89d4: Waiting for close lock at 1732148846159Disabling compacts and flushes for region at 1732148846159Disabling writes for close at 1732148846159Writing region close event to WAL at 1732148846159Closed at 1732148846159 2024-11-21T00:27:26,161 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:26,161 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148846161"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148846161"}]},"ts":"1732148846161"} 2024-11-21T00:27:26,162 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:27:26,163 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:26,163 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148846163"}]},"ts":"1732148846163"} 2024-11-21T00:27:26,165 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:27:26,165 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=b1acf010e5e0a45cfda5ab304dae89d4, ASSIGN}] 2024-11-21T00:27:26,166 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=b1acf010e5e0a45cfda5ab304dae89d4, ASSIGN 2024-11-21T00:27:26,167 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=b1acf010e5e0a45cfda5ab304dae89d4, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,33687,1732148839767; forceNewPlan=false, retain=false 2024-11-21T00:27:26,317 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b1acf010e5e0a45cfda5ab304dae89d4, regionState=OPENING, regionLocation=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:26,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=b1acf010e5e0a45cfda5ab304dae89d4, ASSIGN because future has completed 2024-11-21T00:27:26,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1acf010e5e0a45cfda5ab304dae89d4, server=5ed4808ef0e6,33687,1732148839767}] 2024-11-21T00:27:26,474 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:26,475 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b1acf010e5e0a45cfda5ab304dae89d4, NAME => 'test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:26,475 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:26,475 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:27:26,475 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,475 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:26,475 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,475 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,477 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:26,478 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b1acf010e5e0a45cfda5ab304dae89d4 columnFamilyName f 2024-11-21T00:27:26,478 DEBUG [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:26,478 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] regionserver.HStore(327): Store=b1acf010e5e0a45cfda5ab304dae89d4/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:26,478 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,479 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b1acf010e5e0a45cfda5ab304dae89d4 columnFamilyName f1 2024-11-21T00:27:26,479 DEBUG [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:26,479 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] regionserver.HStore(327): Store=b1acf010e5e0a45cfda5ab304dae89d4/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:26,479 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,480 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b1acf010e5e0a45cfda5ab304dae89d4 columnFamilyName norep 2024-11-21T00:27:26,480 DEBUG [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:26,480 INFO [StoreOpener-b1acf010e5e0a45cfda5ab304dae89d4-1 {}] regionserver.HStore(327): Store=b1acf010e5e0a45cfda5ab304dae89d4/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:26,480 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,481 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,481 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,482 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,482 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,482 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:27:26,483 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,485 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:26,485 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b1acf010e5e0a45cfda5ab304dae89d4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60824548, jitterRate=-0.09364360570907593}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:27:26,485 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:26,485 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b1acf010e5e0a45cfda5ab304dae89d4: Running coprocessor pre-open hook at 1732148846476Writing region info on filesystem at 1732148846476Initializing all the Stores at 1732148846476Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148846476Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148846476Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148846476Cleaning up temporary data from old regions at 1732148846482 (+6 ms)Running coprocessor post-open hooks at 1732148846485 (+3 ms)Region opened successfully at 1732148846485 2024-11-21T00:27:26,486 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4., pid=6, masterSystemTime=1732148846471 2024-11-21T00:27:26,487 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:26,487 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:26,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b1acf010e5e0a45cfda5ab304dae89d4, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:26,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b1acf010e5e0a45cfda5ab304dae89d4, server=5ed4808ef0e6,33687,1732148839767 because future has completed 2024-11-21T00:27:26,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:27:26,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b1acf010e5e0a45cfda5ab304dae89d4, server=5ed4808ef0e6,33687,1732148839767 in 172 msec 2024-11-21T00:27:26,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:27:26,494 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=b1acf010e5e0a45cfda5ab304dae89d4, ASSIGN in 327 msec 2024-11-21T00:27:26,495 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:26,495 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148846495"}]},"ts":"1732148846495"} 2024-11-21T00:27:26,496 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:27:26,497 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:26,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 1.1570 sec 2024-11-21T00:27:26,783 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:27:27,396 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:27:27,401 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:27:27,489 INFO [RPCClient-NioEventLoopGroup-4-2 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:27:27,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@379d426c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:27,497 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42819,-1 for getting cluster id 2024-11-21T00:27:27,497 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:27,498 DEBUG [HMaster-EventLoopGroup-31-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7aafd06b-565f-4e2c-a825-a8d6ed1445cb' 2024-11-21T00:27:27,498 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:27,499 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7aafd06b-565f-4e2c-a825-a8d6ed1445cb" 2024-11-21T00:27:27,499 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@346fc892, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:27,499 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42819,-1] 2024-11-21T00:27:27,499 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:27,500 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:27,501 INFO [HMaster-EventLoopGroup-31-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53696, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:27,502 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bce58a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:27,502 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:27,505 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:27,505 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7accb082 2024-11-21T00:27:27,505 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:27,506 INFO [HMaster-EventLoopGroup-31-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53704, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:27,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:41951,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:27:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:27:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:27,512 DEBUG [PEWorker-1 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:41951' 2024-11-21T00:27:27,521 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2450480b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:27,521 DEBUG [PEWorker-1 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,41951,-1 for getting cluster id 2024-11-21T00:27:27,521 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:27,522 DEBUG [HMaster-EventLoopGroup-33-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fa60625d-922e-41d1-b9e8-6909057e9209' 2024-11-21T00:27:27,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:27,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fa60625d-922e-41d1-b9e8-6909057e9209" 2024-11-21T00:27:27,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32314f9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:27,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,41951,-1] 2024-11-21T00:27:27,523 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:27,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:27,525 INFO [HMaster-EventLoopGroup-33-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50308, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:27,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22eef3e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:27,526 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:27,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:27,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@25b338d7 2024-11-21T00:27:27,527 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:27,528 INFO [HMaster-EventLoopGroup-33-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50314, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:27,529 INFO [PEWorker-1 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-1. 2024-11-21T00:27:27,530 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:27:27,530 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:27,530 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:27,531 INFO [PEWorker-1 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:27,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,532 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:27,532 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,533 DEBUG [PEWorker-1 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:27:27,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,534 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:27,534 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:27,536 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:27,558 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:27:27,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:27,583 DEBUG [PEWorker-1 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:27:27,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:27,978 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0878e017eb460e018013f072af40b5c7, NAME => 'hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b 2024-11-21T00:27:28,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:27:28,016 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:28,017 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 0878e017eb460e018013f072af40b5c7, disabling compactions & flushes 2024-11-21T00:27:28,017 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:28,017 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:28,017 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. after waiting 0 ms 2024-11-21T00:27:28,017 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:28,017 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:28,017 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0878e017eb460e018013f072af40b5c7: Waiting for close lock at 1732148848017Disabling compacts and flushes for region at 1732148848017Disabling writes for close at 1732148848017Writing region close event to WAL at 1732148848017Closed at 1732148848017 2024-11-21T00:27:28,018 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:28,018 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148848018"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148848018"}]},"ts":"1732148848018"} 2024-11-21T00:27:28,021 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:27:28,022 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:28,022 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148848022"}]},"ts":"1732148848022"} 2024-11-21T00:27:28,027 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:27:28,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=0878e017eb460e018013f072af40b5c7, ASSIGN}] 2024-11-21T00:27:28,028 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=0878e017eb460e018013f072af40b5c7, ASSIGN 2024-11-21T00:27:28,029 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=0878e017eb460e018013f072af40b5c7, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,33853,1732148826420; forceNewPlan=false, retain=false 2024-11-21T00:27:28,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:28,180 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=0878e017eb460e018013f072af40b5c7, regionState=OPENING, regionLocation=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:28,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=0878e017eb460e018013f072af40b5c7, ASSIGN because future has completed 2024-11-21T00:27:28,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0878e017eb460e018013f072af40b5c7, server=5ed4808ef0e6,33853,1732148826420}] 2024-11-21T00:27:28,365 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:28,365 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:28,365 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:27:28,377 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33853%2C1732148826420.rep, suffix=, logDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420, archiveDir=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/oldWALs, maxLogs=10 2024-11-21T00:27:28,408 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.rep.1732148848378, exclude list is [], retry=0 2024-11-21T00:27:28,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33273,DS-7ff55e78-ba20-4558-ada4-8cb39555b95f,DISK] 2024-11-21T00:27:28,420 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.rep.1732148848378 2024-11-21T00:27:28,430 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42655:42655)] 2024-11-21T00:27:28,430 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 0878e017eb460e018013f072af40b5c7, NAME => 'hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:28,431 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:28,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:28,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. service=MultiRowMutationService 2024-11-21T00:27:28,431 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:27:28,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:28,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,431 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,436 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,437 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0878e017eb460e018013f072af40b5c7 columnFamilyName hfileref 2024-11-21T00:27:28,437 DEBUG [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:28,437 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] regionserver.HStore(327): Store=0878e017eb460e018013f072af40b5c7/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:28,438 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,439 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0878e017eb460e018013f072af40b5c7 columnFamilyName queue 2024-11-21T00:27:28,439 DEBUG [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:28,440 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] regionserver.HStore(327): Store=0878e017eb460e018013f072af40b5c7/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:28,440 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,441 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0878e017eb460e018013f072af40b5c7 columnFamilyName sid 2024-11-21T00:27:28,441 DEBUG [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:28,442 INFO [StoreOpener-0878e017eb460e018013f072af40b5c7-1 {}] regionserver.HStore(327): Store=0878e017eb460e018013f072af40b5c7/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:28,442 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,443 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/replication/0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,443 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/replication/0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,445 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,445 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,446 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:27:28,447 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,466 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/replication/0878e017eb460e018013f072af40b5c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:28,466 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 0878e017eb460e018013f072af40b5c7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61350489, jitterRate=-0.0858064740896225}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:27:28,466 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:28,466 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 0878e017eb460e018013f072af40b5c7: Running coprocessor pre-open hook at 1732148848432Writing region info on filesystem at 1732148848432Initializing all the Stores at 1732148848435 (+3 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148848435Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148848435Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148848435Cleaning up temporary data from old regions at 1732148848445 (+10 ms)Running coprocessor post-open hooks at 1732148848466 (+21 ms)Region opened successfully at 1732148848466 2024-11-21T00:27:28,467 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7., pid=10, masterSystemTime=1732148848340 2024-11-21T00:27:28,473 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:28,473 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:28,477 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=0878e017eb460e018013f072af40b5c7, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:28,482 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42819 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=5ed4808ef0e6,33853,1732148826420, table=hbase:replication, region=0878e017eb460e018013f072af40b5c7. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T00:27:28,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0878e017eb460e018013f072af40b5c7, server=5ed4808ef0e6,33853,1732148826420 because future has completed 2024-11-21T00:27:28,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:27:28,514 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 0878e017eb460e018013f072af40b5c7, server=5ed4808ef0e6,33853,1732148826420 in 319 msec 2024-11-21T00:27:28,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:27:28,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=0878e017eb460e018013f072af40b5c7, ASSIGN in 487 msec 2024-11-21T00:27:28,519 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:28,520 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148848519"}]},"ts":"1732148848519"} 2024-11-21T00:27:28,522 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:27:28,523 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:28,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 992 msec 2024-11-21T00:27:28,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7., hostname=5ed4808ef0e6,33853,1732148826420, seqNum=2] 2024-11-21T00:27:28,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:28,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:28,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:27:28,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:28,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33853 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:27:28,761 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:27:28,829 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,33853,1732148826420, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:27:28,831 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:28,831 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33853,1732148826420, seqNum=-1] 2024-11-21T00:27:28,832 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:28,833 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49385, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.14 (auth:SIMPLE), service=ClientService 2024-11-21T00:27:28,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,33853,1732148826420', locateType=CURRENT is [region=hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7., hostname=5ed4808ef0e6,33853,1732148826420, seqNum=2] 2024-11-21T00:27:28,849 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:27:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:27:28,853 INFO [PEWorker-4 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,33853,1732148826420 suceeded 2024-11-21T00:27:28,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:27:28,857 INFO [PEWorker-5 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:41951,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:27:28,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 253 msec 2024-11-21T00:27:28,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.3500 sec 2024-11-21T00:27:28,876 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:41951' 2024-11-21T00:27:28,878 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@404b408c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:28,879 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,41951,-1 for getting cluster id 2024-11-21T00:27:28,879 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:28,879 DEBUG [HMaster-EventLoopGroup-33-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fa60625d-922e-41d1-b9e8-6909057e9209' 2024-11-21T00:27:28,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:28,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fa60625d-922e-41d1-b9e8-6909057e9209" 2024-11-21T00:27:28,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@6cbb58d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:28,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,41951,-1] 2024-11-21T00:27:28,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:28,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:28,883 INFO [HMaster-EventLoopGroup-33-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50326, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.14 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:28,884 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@7fe1adc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:28,884 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:28,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:28,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@58fdcd09 2024-11-21T00:27:28,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:28,887 INFO [HMaster-EventLoopGroup-33-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50334, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.14 (auth:SIMPLE), service=MasterService 2024-11-21T00:27:28,888 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,33853,1732148826420 (queues=1) is replicating from cluster=7aafd06b-565f-4e2c-a825-a8d6ed1445cb to cluster=fa60625d-922e-41d1-b9e8-6909057e9209 2024-11-21T00:27:28,888 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C33853%2C1732148826420 2024-11-21T00:27:28,888 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,33853,1732148826420, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:27:28,897 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, startPosition=0, beingWritten=true 2024-11-21T00:27:28,904 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C33853%2C1732148826420 2024-11-21T00:27:28,930 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:28,930 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:28,930 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:29,169 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:29,511 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42819 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:29,661 INFO [RPCClient-NioEventLoopGroup-4-5 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:27:29,661 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:29,662 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:254) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:29,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:29,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:29,662 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:29,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23cd5751, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:29,681 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,41951,-1 for getting cluster id 2024-11-21T00:27:29,681 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:29,684 DEBUG [HMaster-EventLoopGroup-33-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fa60625d-922e-41d1-b9e8-6909057e9209' 2024-11-21T00:27:29,684 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:29,684 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fa60625d-922e-41d1-b9e8-6909057e9209" 2024-11-21T00:27:29,685 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@486b1c85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:29,685 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,41951,-1] 2024-11-21T00:27:29,685 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:29,685 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:29,686 INFO [HMaster-EventLoopGroup-33-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50344, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:29,688 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bf32462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:29,688 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:29,689 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:29,689 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1c49ea96 2024-11-21T00:27:29,689 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:29,691 INFO [HMaster-EventLoopGroup-33-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:29,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:46151,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:27:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:27:29,694 DEBUG [PEWorker-5 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:46151' 2024-11-21T00:27:29,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:29,705 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@236e7f3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:29,706 DEBUG [PEWorker-5 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:29,706 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:29,707 DEBUG [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:29,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:29,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:29,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21c460b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:29,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:29,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:29,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:29,708 INFO [HMaster-EventLoopGroup-35-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35916, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:29,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a02793d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:29,709 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:29,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:29,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@206eae89 2024-11-21T00:27:29,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:29,710 INFO [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35928, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:29,711 INFO [PEWorker-5 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-5. 2024-11-21T00:27:29,711 DEBUG [PEWorker-5 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:27:29,711 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:29,711 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:29,711 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:29,712 INFO [PEWorker-5 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:29,713 DEBUG [PEWorker-5 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:27:29,714 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:29,714 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:29,715 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:29,764 DEBUG [PEWorker-5 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:27:29,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:27:29,793 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7dc046db6be22eef18a273b77c92911e, NAME => 'hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea 2024-11-21T00:27:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:29,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:27:29,838 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:29,838 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 7dc046db6be22eef18a273b77c92911e, disabling compactions & flushes 2024-11-21T00:27:29,838 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:29,838 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:29,838 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. after waiting 0 ms 2024-11-21T00:27:29,838 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:29,838 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:29,839 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7dc046db6be22eef18a273b77c92911e: Waiting for close lock at 1732148849838Disabling compacts and flushes for region at 1732148849838Disabling writes for close at 1732148849838Writing region close event to WAL at 1732148849838Closed at 1732148849838 2024-11-21T00:27:29,840 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:29,840 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148849840"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148849840"}]},"ts":"1732148849840"} 2024-11-21T00:27:29,842 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:27:29,843 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:29,844 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148849843"}]},"ts":"1732148849843"} 2024-11-21T00:27:29,846 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:27:29,846 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=7dc046db6be22eef18a273b77c92911e, ASSIGN}] 2024-11-21T00:27:29,848 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=7dc046db6be22eef18a273b77c92911e, ASSIGN 2024-11-21T00:27:29,849 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=7dc046db6be22eef18a273b77c92911e, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,44369,1732148833096; forceNewPlan=false, retain=false 2024-11-21T00:27:29,926 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:29,940 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:30,000 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7dc046db6be22eef18a273b77c92911e, regionState=OPENING, regionLocation=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:30,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:30,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=7dc046db6be22eef18a273b77c92911e, ASSIGN because future has completed 2024-11-21T00:27:30,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7dc046db6be22eef18a273b77c92911e, server=5ed4808ef0e6,44369,1732148833096}] 2024-11-21T00:27:30,181 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:30,181 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:30,181 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:27:30,183 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C44369%2C1732148833096.rep, suffix=, logDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096, archiveDir=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/oldWALs, maxLogs=10 2024-11-21T00:27:30,203 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.rep.1732148850183, exclude list is [], retry=0 2024-11-21T00:27:30,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37973,DS-d99e9ffc-aee0-4df8-97a6-2e07959051f5,DISK] 2024-11-21T00:27:30,240 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.rep.1732148850183 2024-11-21T00:27:30,249 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33105:33105)] 2024-11-21T00:27:30,249 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 7dc046db6be22eef18a273b77c92911e, NAME => 'hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:30,249 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:30,250 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:30,250 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. service=MultiRowMutationService 2024-11-21T00:27:30,250 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:27:30,250 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,250 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:30,250 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,250 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,272 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,274 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dc046db6be22eef18a273b77c92911e columnFamilyName hfileref 2024-11-21T00:27:30,274 DEBUG [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:30,274 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] regionserver.HStore(327): Store=7dc046db6be22eef18a273b77c92911e/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:30,274 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,276 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dc046db6be22eef18a273b77c92911e columnFamilyName queue 2024-11-21T00:27:30,276 DEBUG [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:30,276 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] regionserver.HStore(327): Store=7dc046db6be22eef18a273b77c92911e/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:30,276 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,277 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dc046db6be22eef18a273b77c92911e columnFamilyName sid 2024-11-21T00:27:30,277 DEBUG [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:30,277 INFO [StoreOpener-7dc046db6be22eef18a273b77c92911e-1 {}] regionserver.HStore(327): Store=7dc046db6be22eef18a273b77c92911e/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:30,278 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,281 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,292 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,293 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,294 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:27:30,298 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,317 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:30,318 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 7dc046db6be22eef18a273b77c92911e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70973854, jitterRate=0.05759283900260925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:27:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:30,318 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:30,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 7dc046db6be22eef18a273b77c92911e: Running coprocessor pre-open hook at 1732148850250Writing region info on filesystem at 1732148850250Initializing all the Stores at 1732148850251 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148850251Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148850272 (+21 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148850272Cleaning up temporary data from old regions at 1732148850293 (+21 ms)Running coprocessor post-open hooks at 1732148850318 (+25 ms)Region opened successfully at 1732148850318 2024-11-21T00:27:30,320 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e., pid=10, masterSystemTime=1732148850173 2024-11-21T00:27:30,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:30,326 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:30,329 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7dc046db6be22eef18a273b77c92911e, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:30,337 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=10, ppid=9, state=RUNNABLE, hasLock=true; OpenRegionProcedure 7dc046db6be22eef18a273b77c92911e, server=5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:30,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:27:30,356 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:27:30,356 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=7dc046db6be22eef18a273b77c92911e, ASSIGN in 501 msec 2024-11-21T00:27:30,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 7dc046db6be22eef18a273b77c92911e, server=5ed4808ef0e6,44369,1732148833096 in 326 msec 2024-11-21T00:27:30,359 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:30,360 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148850359"}]},"ts":"1732148850359"} 2024-11-21T00:27:30,363 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:27:30,364 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:30,368 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 652 msec 2024-11-21T00:27:30,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e., hostname=5ed4808ef0e6,44369,1732148833096, seqNum=2] 2024-11-21T00:27:30,453 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:30,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:30,476 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:30,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:27:30,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44369 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:27:30,784 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:27:30,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:30,850 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,44369,1732148833096, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:27:30,851 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:30,851 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44369,1732148833096, seqNum=-1] 2024-11-21T00:27:30,851 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:30,852 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-34-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43707, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.15 (auth:SIMPLE), service=ClientService 2024-11-21T00:27:30,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-34-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,44369,1732148833096', locateType=CURRENT is [region=hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e., hostname=5ed4808ef0e6,44369,1732148833096, seqNum=2] 2024-11-21T00:27:30,870 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:27:30,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:27:30,881 INFO [PEWorker-2 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,44369,1732148833096 suceeded 2024-11-21T00:27:30,884 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:27:30,884 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 274 msec 2024-11-21T00:27:30,884 INFO [PEWorker-2 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:46151,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:27:30,886 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.1930 sec 2024-11-21T00:27:30,904 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:46151' 2024-11-21T00:27:30,905 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@63e25324, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:30,905 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:30,905 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:30,906 DEBUG [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:30,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:30,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:30,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@4bb34a2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:30,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:30,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:30,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:30,908 INFO [HMaster-EventLoopGroup-35-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35956, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.15 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:30,909 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@55448640, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:30,909 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:30,910 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:30,910 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3f555657 2024-11-21T00:27:30,910 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:30,911 INFO [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35966, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.15 (auth:SIMPLE), service=MasterService 2024-11-21T00:27:30,919 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,44369,1732148833096 (queues=1) is replicating from cluster=fa60625d-922e-41d1-b9e8-6909057e9209 to cluster=46c96938-ca9c-4723-82fe-8a99d3a89209 2024-11-21T00:27:30,919 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C44369%2C1732148833096 2024-11-21T00:27:30,919 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,44369,1732148833096, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:27:30,929 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, startPosition=0, beingWritten=true 2024-11-21T00:27:30,936 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C44369%2C1732148833096 2024-11-21T00:27:30,979 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:30,979 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:30,979 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:31,082 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:31,209 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:31,544 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:31,802 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41951 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:31,848 INFO [RPCClient-NioEventLoopGroup-4-8 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:27:31,848 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:31,848 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:255) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:31,848 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:31,851 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:31,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:31,874 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68fef6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:31,875 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:31,878 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:31,881 DEBUG [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:31,881 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:31,882 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:31,884 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72fb364c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:31,885 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:31,886 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:31,886 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:31,888 INFO [HMaster-EventLoopGroup-35-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51808, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:31,890 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e9f4f91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:31,890 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:31,892 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:31,892 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@79a18b66 2024-11-21T00:27:31,892 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:31,893 INFO [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51814, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:31,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:42819,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:27:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:27:31,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:31,899 DEBUG [PEWorker-1 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:42819' 2024-11-21T00:27:31,900 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@268333fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:31,900 DEBUG [PEWorker-1 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42819,-1 for getting cluster id 2024-11-21T00:27:31,901 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:31,902 DEBUG [HMaster-EventLoopGroup-31-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7aafd06b-565f-4e2c-a825-a8d6ed1445cb' 2024-11-21T00:27:31,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:31,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7aafd06b-565f-4e2c-a825-a8d6ed1445cb" 2024-11-21T00:27:31,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@553cf8b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:31,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42819,-1] 2024-11-21T00:27:31,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:31,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:31,904 INFO [HMaster-EventLoopGroup-31-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60886, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:31,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ae4735, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:31,906 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:31,907 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:31,907 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@31ed2557 2024-11-21T00:27:31,908 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:31,909 INFO [HMaster-EventLoopGroup-31-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60894, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:31,911 INFO [PEWorker-1 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-1. 2024-11-21T00:27:31,911 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:27:31,911 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:31,911 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:31,911 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:31,913 INFO [PEWorker-1 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:31,914 DEBUG [PEWorker-1 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:27:31,916 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:27:31,917 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:31,918 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:27:31,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:27:31,934 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8c770a4c46c01e4bd8e16042c47ed8e3, NAME => 'hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e 2024-11-21T00:27:31,965 DEBUG [PEWorker-1 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:27:31,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:27:31,990 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:32,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:32,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:32,371 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:32,371 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 8c770a4c46c01e4bd8e16042c47ed8e3, disabling compactions & flushes 2024-11-21T00:27:32,371 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:32,371 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:32,371 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. after waiting 0 ms 2024-11-21T00:27:32,371 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:32,371 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:32,371 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8c770a4c46c01e4bd8e16042c47ed8e3: Waiting for close lock at 1732148852371Disabling compacts and flushes for region at 1732148852371Disabling writes for close at 1732148852371Writing region close event to WAL at 1732148852371Closed at 1732148852371 2024-11-21T00:27:32,372 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:27:32,373 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148852372"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148852372"}]},"ts":"1732148852372"} 2024-11-21T00:27:32,375 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:27:32,376 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:27:32,377 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148852376"}]},"ts":"1732148852376"} 2024-11-21T00:27:32,379 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:27:32,380 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=8c770a4c46c01e4bd8e16042c47ed8e3, ASSIGN}] 2024-11-21T00:27:32,381 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=8c770a4c46c01e4bd8e16042c47ed8e3, ASSIGN 2024-11-21T00:27:32,382 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=8c770a4c46c01e4bd8e16042c47ed8e3, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,33687,1732148839767; forceNewPlan=false, retain=false 2024-11-21T00:27:32,502 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:32,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:32,535 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=8c770a4c46c01e4bd8e16042c47ed8e3, regionState=OPENING, regionLocation=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:32,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=8c770a4c46c01e4bd8e16042c47ed8e3, ASSIGN because future has completed 2024-11-21T00:27:32,548 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c770a4c46c01e4bd8e16042c47ed8e3, server=5ed4808ef0e6,33687,1732148839767}] 2024-11-21T00:27:32,643 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:32,709 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:32,709 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:32,709 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:27:32,711 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C33687%2C1732148839767.rep, suffix=, logDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767, archiveDir=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/oldWALs, maxLogs=10 2024-11-21T00:27:32,733 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.rep.1732148852711, exclude list is [], retry=0 2024-11-21T00:27:32,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38013,DS-1b285dae-a468-4813-ae49-fa9fd5915b2b,DISK] 2024-11-21T00:27:32,765 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.rep.1732148852711 2024-11-21T00:27:32,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43027:43027)] 2024-11-21T00:27:32,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 8c770a4c46c01e4bd8e16042c47ed8e3, NAME => 'hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:32,775 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:32,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:32,776 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. service=MultiRowMutationService 2024-11-21T00:27:32,776 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:27:32,776 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,776 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:32,776 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,776 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,781 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,789 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c770a4c46c01e4bd8e16042c47ed8e3 columnFamilyName hfileref 2024-11-21T00:27:32,789 DEBUG [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:32,789 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] regionserver.HStore(327): Store=8c770a4c46c01e4bd8e16042c47ed8e3/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:32,789 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,792 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c770a4c46c01e4bd8e16042c47ed8e3 columnFamilyName queue 2024-11-21T00:27:32,792 DEBUG [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:32,792 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] regionserver.HStore(327): Store=8c770a4c46c01e4bd8e16042c47ed8e3/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:32,792 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,794 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c770a4c46c01e4bd8e16042c47ed8e3 columnFamilyName sid 2024-11-21T00:27:32,794 DEBUG [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:32,795 INFO [StoreOpener-8c770a4c46c01e4bd8e16042c47ed8e3-1 {}] regionserver.HStore(327): Store=8c770a4c46c01e4bd8e16042c47ed8e3/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:32,795 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/replication/8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/replication/8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,797 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,797 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,798 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:27:32,801 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,808 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/replication/8c770a4c46c01e4bd8e16042c47ed8e3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:32,809 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 8c770a4c46c01e4bd8e16042c47ed8e3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70846046, jitterRate=0.05568835139274597}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:27:32,809 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:32,809 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 8c770a4c46c01e4bd8e16042c47ed8e3: Running coprocessor pre-open hook at 1732148852776Writing region info on filesystem at 1732148852776Initializing all the Stores at 1732148852777 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148852778 (+1 ms)Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148852780 (+2 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148852780Cleaning up temporary data from old regions at 1732148852797 (+17 ms)Running coprocessor post-open hooks at 1732148852809 (+12 ms)Region opened successfully at 1732148852809 2024-11-21T00:27:32,816 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3., pid=10, masterSystemTime=1732148852704 2024-11-21T00:27:32,818 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:32,818 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:32,819 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=8c770a4c46c01e4bd8e16042c47ed8e3, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:32,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c770a4c46c01e4bd8e16042c47ed8e3, server=5ed4808ef0e6,33687,1732148839767 because future has completed 2024-11-21T00:27:32,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:27:32,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 8c770a4c46c01e4bd8e16042c47ed8e3, server=5ed4808ef0e6,33687,1732148839767 in 299 msec 2024-11-21T00:27:32,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:27:32,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=8c770a4c46c01e4bd8e16042c47ed8e3, ASSIGN in 471 msec 2024-11-21T00:27:32,861 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:27:32,861 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148852861"}]},"ts":"1732148852861"} 2024-11-21T00:27:32,866 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:27:32,868 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:27:32,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 955 msec 2024-11-21T00:27:32,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3., hostname=5ed4808ef0e6,33687,1732148839767, seqNum=2] 2024-11-21T00:27:33,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:33,114 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:33,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:33,375 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:33,574 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:33,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:27:33,826 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:33,934 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:27:33,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:33,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33687 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:27:34,008 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:27:34,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:34,096 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,097 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,097 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,102 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,119 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,123 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,33687,1732148839767, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:27:34,124 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:34,124 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33687,1732148839767, seqNum=-1] 2024-11-21T00:27:34,135 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:34,136 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:34,140 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55429, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=ClientService 2024-11-21T00:27:34,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,33687,1732148839767', locateType=CURRENT is [region=hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3., hostname=5ed4808ef0e6,33687,1732148839767, seqNum=2] 2024-11-21T00:27:34,227 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:27:34,228 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:27:34,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:27:34,239 INFO [PEWorker-5 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,33687,1732148839767 suceeded 2024-11-21T00:27:34,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:27:34,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 435 msec 2024-11-21T00:27:34,242 INFO [PEWorker-5 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:42819,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:27:34,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 2.3490 sec 2024-11-21T00:27:34,301 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:42819' 2024-11-21T00:27:34,308 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1fb1c1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:34,309 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42819,-1 for getting cluster id 2024-11-21T00:27:34,309 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:34,310 DEBUG [HMaster-EventLoopGroup-31-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7aafd06b-565f-4e2c-a825-a8d6ed1445cb' 2024-11-21T00:27:34,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:34,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7aafd06b-565f-4e2c-a825-a8d6ed1445cb" 2024-11-21T00:27:34,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3a2a705d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:34,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42819,-1] 2024-11-21T00:27:34,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:34,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:34,313 INFO [HMaster-EventLoopGroup-31-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60906, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:34,320 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@47ed93c2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:34,321 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:34,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:34,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@62d821ac 2024-11-21T00:27:34,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:34,329 INFO [HMaster-EventLoopGroup-31-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60920, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=MasterService 2024-11-21T00:27:34,333 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,33687,1732148839767 (queues=1) is replicating from cluster=46c96938-ca9c-4723-82fe-8a99d3a89209 to cluster=7aafd06b-565f-4e2c-a825-a8d6ed1445cb 2024-11-21T00:27:34,333 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C33687%2C1732148839767 2024-11-21T00:27:34,333 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,33687,1732148839767, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:27:34,334 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, startPosition=0, beingWritten=true 2024-11-21T00:27:34,337 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C33687%2C1732148839767 2024-11-21T00:27:34,363 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:34,363 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:34,363 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:34,574 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:34,602 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:34,653 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:34,908 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:35,344 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:35,464 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:35,605 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:35,754 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:35,882 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:36,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:27:36,069 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:27:36,069 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:36,069 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:256) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:36,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:36,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:36,070 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:36,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d80671, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,079 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42819,-1 for getting cluster id 2024-11-21T00:27:36,079 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:36,081 DEBUG [HMaster-EventLoopGroup-31-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7aafd06b-565f-4e2c-a825-a8d6ed1445cb' 2024-11-21T00:27:36,081 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:36,081 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7aafd06b-565f-4e2c-a825-a8d6ed1445cb" 2024-11-21T00:27:36,081 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@665aea47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,082 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42819,-1] 2024-11-21T00:27:36,082 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:36,082 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:36,084 INFO [HMaster-EventLoopGroup-31-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60938, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:36,085 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27a6d6c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,097 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@185d9296, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,097 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,41951,-1 for getting cluster id 2024-11-21T00:27:36,097 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:36,098 DEBUG [HMaster-EventLoopGroup-33-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fa60625d-922e-41d1-b9e8-6909057e9209' 2024-11-21T00:27:36,098 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:36,098 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fa60625d-922e-41d1-b9e8-6909057e9209" 2024-11-21T00:27:36,099 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@514e5ed1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,099 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,41951,-1] 2024-11-21T00:27:36,099 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:36,099 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:36,100 INFO [HMaster-EventLoopGroup-33-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46884, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:36,101 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54b45eff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14102322, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,112 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:36,113 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:36,114 DEBUG [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:36,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:36,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:36,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37775c2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:36,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:36,116 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:36,117 INFO [HMaster-EventLoopGroup-35-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51828, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:36,118 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@451f51c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:36,119 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:36,120 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33853,1732148826420, seqNum=-1] 2024-11-21T00:27:36,120 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:36,124 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56698, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:36,126 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19., hostname=5ed4808ef0e6,33853,1732148826420, seqNum=2] 2024-11-21T00:27:36,130 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:36,131 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33687,1732148839767, seqNum=-1] 2024-11-21T00:27:36,131 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:36,133 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50910, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:36,136 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4., hostname=5ed4808ef0e6,33687,1732148839767, seqNum=2] 2024-11-21T00:27:36,139 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row. IsDeleteReplication:false 2024-11-21T00:27:36,521 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:36,617 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:36,978 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 379, reset compression=false 2024-11-21T00:27:37,027 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:37,028 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 480, reset compression=false 2024-11-21T00:27:37,028 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[{test/69eb84528d7bfcc8bfbb1997d42d5d19/4=[#edits: 1 = ],199}], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=480, nbRowKeys=1, nbHFiles=0, heapSize=199, lastSeqIds={}, endOfFile=false,usedBufferSize=199] 2024-11-21T00:27:37,036 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:37,037 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-34-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.14 (auth:SIMPLE), service=AdminService 2024-11-21T00:27:37,038 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:37,056 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@76ca6586, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:37,057 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,41951,-1 for getting cluster id 2024-11-21T00:27:37,057 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:37,058 DEBUG [HMaster-EventLoopGroup-33-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'fa60625d-922e-41d1-b9e8-6909057e9209' 2024-11-21T00:27:37,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:37,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "fa60625d-922e-41d1-b9e8-6909057e9209" 2024-11-21T00:27:37,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@787caa9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:37,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,41951,-1] 2024-11-21T00:27:37,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:37,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:37,064 INFO [HMaster-EventLoopGroup-33-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46904, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.15 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:37,065 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@5c4c198d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:37,066 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:37,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44369,1732148833096, seqNum=-1] 2024-11-21T00:27:37,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:37,069 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-34-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44598, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.15 (auth:SIMPLE), service=ClientService 2024-11-21T00:27:37,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148843191.fa18a0bdbdf880445723239afb964dd9., hostname=5ed4808ef0e6,44369,1732148833096, seqNum=2] 2024-11-21T00:27:37,076 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:37,141 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row. IsDeleteReplication:false 2024-11-21T00:27:37,227 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:37,268 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 480, reset compression=false 2024-11-21T00:27:37,600 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 480, reset compression=false 2024-11-21T00:27:37,743 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 379, reset compression=false 2024-11-21T00:27:37,775 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:37,775 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 503, reset compression=false 2024-11-21T00:27:37,775 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[{test/fa18a0bdbdf880445723239afb964dd9/4=[#edits: 1 = ],215}], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=503, nbRowKeys=1, nbHFiles=0, heapSize=215, lastSeqIds={}, endOfFile=false,usedBufferSize=215] 2024-11-21T00:27:37,777 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:37,780 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50926, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.15 (auth:SIMPLE), service=AdminService 2024-11-21T00:27:37,781 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:37,803 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@66e13d0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:37,803 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:37,803 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:37,804 DEBUG [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:37,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:37,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:37,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@56eb9336, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:37,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:37,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:37,805 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:37,805 INFO [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:37,807 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@69a8933f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:37,808 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:37,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33687,1732148839767, seqNum=-1] 2024-11-21T00:27:37,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:37,811 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50936, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=ClientService 2024-11-21T00:27:37,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4., hostname=5ed4808ef0e6,33687,1732148839767, seqNum=2] 2024-11-21T00:27:37,819 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:38,015 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 503, reset compression=false 2024-11-21T00:27:38,033 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 379, reset compression=false 2024-11-21T00:27:38,040 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 480, reset compression=false 2024-11-21T00:27:38,080 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:38,080 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 527, reset compression=false 2024-11-21T00:27:38,081 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=527, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:38,144 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row. IsDeleteReplication:false 2024-11-21T00:27:38,146 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:38,147 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,44369,1732148833096, seqNum=-1] 2024-11-21T00:27:38,148 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:38,153 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-34-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44610, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:38,161 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row1', locateType=CURRENT is [region=test,,1732148843191.fa18a0bdbdf880445723239afb964dd9., hostname=5ed4808ef0e6,44369,1732148833096, seqNum=2] 2024-11-21T00:27:38,170 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row1. IsDeleteReplication:false 2024-11-21T00:27:38,319 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 527, reset compression=false 2024-11-21T00:27:38,331 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 503, reset compression=false 2024-11-21T00:27:38,342 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:38,342 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 607, reset compression=false 2024-11-21T00:27:38,342 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[{test/fa18a0bdbdf880445723239afb964dd9/5=[#edits: 1 = ],207}], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=607, nbRowKeys=1, nbHFiles=0, heapSize=207, lastSeqIds={}, endOfFile=false,usedBufferSize=207] 2024-11-21T00:27:38,344 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:38,346 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:38,351 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:38,351 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 655, reset compression=false 2024-11-21T00:27:38,351 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[{test/b1acf010e5e0a45cfda5ab304dae89d4/5=[#edits: 1 = ],223}], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=655, nbRowKeys=1, nbHFiles=0, heapSize=223, lastSeqIds={}, endOfFile=false,usedBufferSize=223] 2024-11-21T00:27:38,356 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:38,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56700, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=AdminService 2024-11-21T00:27:38,357 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:38,394 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3c7b873d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:38,394 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42819,-1 for getting cluster id 2024-11-21T00:27:38,394 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:38,395 DEBUG [HMaster-EventLoopGroup-31-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7aafd06b-565f-4e2c-a825-a8d6ed1445cb' 2024-11-21T00:27:38,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:38,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7aafd06b-565f-4e2c-a825-a8d6ed1445cb" 2024-11-21T00:27:38,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@2e999857, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:38,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42819,-1] 2024-11-21T00:27:38,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:38,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:38,397 INFO [HMaster-EventLoopGroup-31-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60958, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.14 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:38,398 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3cf652be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:38,398 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:38,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,33853,1732148826420, seqNum=-1] 2024-11-21T00:27:38,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:38,401 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56710, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.14 (auth:SIMPLE), service=ClientService 2024-11-21T00:27:38,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row1', locateType=CURRENT is [region=test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19., hostname=5ed4808ef0e6,33853,1732148826420, seqNum=2] 2024-11-21T00:27:38,407 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:38,579 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 607, reset compression=false 2024-11-21T00:27:38,585 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 480, reset compression=false 2024-11-21T00:27:38,590 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:38,590 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 631, reset compression=false 2024-11-21T00:27:38,595 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=631, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:38,606 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 655, reset compression=false 2024-11-21T00:27:38,805 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:38,812 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 631, reset compression=false 2024-11-21T00:27:38,883 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 607, reset compression=false 2024-11-21T00:27:38,917 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 655, reset compression=false 2024-11-21T00:27:39,130 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 631, reset compression=false 2024-11-21T00:27:39,174 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row1. IsDeleteReplication:false 2024-11-21T00:27:39,179 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row2. IsDeleteReplication:false 2024-11-21T00:27:39,326 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 607, reset compression=false 2024-11-21T00:27:39,339 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 655, reset compression=false 2024-11-21T00:27:39,346 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:39,346 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 759, reset compression=false 2024-11-21T00:27:39,347 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[{test/b1acf010e5e0a45cfda5ab304dae89d4/6=[#edits: 1 = ],207}], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=759, nbRowKeys=1, nbHFiles=0, heapSize=207, lastSeqIds={}, endOfFile=false,usedBufferSize=207] 2024-11-21T00:27:39,349 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:39,356 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:39,459 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:27:39,539 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 631, reset compression=false 2024-11-21T00:27:39,566 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:39,566 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 758, reset compression=false 2024-11-21T00:27:39,566 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[{test/69eb84528d7bfcc8bfbb1997d42d5d19/6=[#edits: 1 = ],223}], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=758, nbRowKeys=1, nbHFiles=0, heapSize=223, lastSeqIds={}, endOfFile=false,usedBufferSize=223] 2024-11-21T00:27:39,567 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:39,569 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:39,570 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 759, reset compression=false 2024-11-21T00:27:39,802 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 758, reset compression=false 2024-11-21T00:27:39,834 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 607, reset compression=false 2024-11-21T00:27:39,869 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:39,869 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 757, reset compression=false 2024-11-21T00:27:39,869 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=757, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:39,907 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 759, reset compression=false 2024-11-21T00:27:40,112 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 757, reset compression=false 2024-11-21T00:27:40,133 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 758, reset compression=false 2024-11-21T00:27:40,181 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row2. IsDeleteReplication:false 2024-11-21T00:27:40,186 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row. IsDeleteReplication:true 2024-11-21T00:27:40,350 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 759, reset compression=false 2024-11-21T00:27:40,432 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 757, reset compression=false 2024-11-21T00:27:40,569 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 758, reset compression=false 2024-11-21T00:27:40,586 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:40,586 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 914, reset compression=false 2024-11-21T00:27:40,586 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[{test/69eb84528d7bfcc8bfbb1997d42d5d19/7=[#edits: 2 = ],271}], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=914, nbRowKeys=1, nbHFiles=0, heapSize=271, lastSeqIds={}, endOfFile=false,usedBufferSize=271] 2024-11-21T00:27:40,587 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:40,595 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:40,809 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 914, reset compression=false 2024-11-21T00:27:40,838 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 757, reset compression=false 2024-11-21T00:27:40,855 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:40,855 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 904, reset compression=false 2024-11-21T00:27:40,855 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[{test/fa18a0bdbdf880445723239afb964dd9/7=[#edits: 2 = ],287}], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=904, nbRowKeys=1, nbHFiles=0, heapSize=287, lastSeqIds={}, endOfFile=false,usedBufferSize=287] 2024-11-21T00:27:40,856 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:40,858 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 759, reset compression=false 2024-11-21T00:27:40,860 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:40,887 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:40,887 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 930, reset compression=false 2024-11-21T00:27:40,887 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=930, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:41,060 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 904, reset compression=false 2024-11-21T00:27:41,094 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 930, reset compression=false 2024-11-21T00:27:41,114 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 914, reset compression=false 2024-11-21T00:27:41,189 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row. IsDeleteReplication:true 2024-11-21T00:27:41,195 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row1. IsDeleteReplication:true 2024-11-21T00:27:41,370 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 904, reset compression=false 2024-11-21T00:27:41,383 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:41,383 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1063, reset compression=false 2024-11-21T00:27:41,383 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[{test/fa18a0bdbdf880445723239afb964dd9/8=[#edits: 2 = ],279}], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=1063, nbRowKeys=1, nbHFiles=0, heapSize=279, lastSeqIds={}, endOfFile=false,usedBufferSize=279] 2024-11-21T00:27:41,384 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:41,387 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:41,405 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 930, reset compression=false 2024-11-21T00:27:41,447 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:41,447 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1080, reset compression=false 2024-11-21T00:27:41,447 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[{test/b1acf010e5e0a45cfda5ab304dae89d4/8=[#edits: 2 = ],295}], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=1080, nbRowKeys=1, nbHFiles=0, heapSize=295, lastSeqIds={}, endOfFile=false,usedBufferSize=295] 2024-11-21T00:27:41,448 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:41,451 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:41,543 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 914, reset compression=false 2024-11-21T00:27:41,554 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:41,554 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1087, reset compression=false 2024-11-21T00:27:41,554 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=1087, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:41,632 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1063, reset compression=false 2024-11-21T00:27:41,654 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1080, reset compression=false 2024-11-21T00:27:41,775 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1087, reset compression=false 2024-11-21T00:27:41,947 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1063, reset compression=false 2024-11-21T00:27:41,974 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1080, reset compression=false 2024-11-21T00:27:42,084 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1087, reset compression=false 2024-11-21T00:27:42,201 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row1. IsDeleteReplication:true 2024-11-21T00:27:42,207 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row2. IsDeleteReplication:true 2024-11-21T00:27:42,364 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1063, reset compression=false 2024-11-21T00:27:42,401 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1080, reset compression=false 2024-11-21T00:27:42,428 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:42,428 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1239, reset compression=false 2024-11-21T00:27:42,428 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[{test/b1acf010e5e0a45cfda5ab304dae89d4/9=[#edits: 2 = ],279}], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=1239, nbRowKeys=1, nbHFiles=0, heapSize=279, lastSeqIds={}, endOfFile=false,usedBufferSize=279] 2024-11-21T00:27:42,429 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:42,432 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:42,499 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1087, reset compression=false 2024-11-21T00:27:42,525 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:42,525 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1236, reset compression=false 2024-11-21T00:27:42,526 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[{test/69eb84528d7bfcc8bfbb1997d42d5d19/9=[#edits: 2 = ],295}], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=1236, nbRowKeys=1, nbHFiles=0, heapSize=295, lastSeqIds={}, endOfFile=false,usedBufferSize=295] 2024-11-21T00:27:42,527 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:42,530 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:42,657 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1239, reset compression=false 2024-11-21T00:27:42,756 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1236, reset compression=false 2024-11-21T00:27:42,792 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:42,885 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1063, reset compression=false 2024-11-21T00:27:42,917 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:42,917 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1235, reset compression=false 2024-11-21T00:27:42,917 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=1235, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:42,987 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1239, reset compression=false 2024-11-21T00:27:43,089 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1236, reset compression=false 2024-11-21T00:27:43,141 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1235, reset compression=false 2024-11-21T00:27:43,209 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row2. IsDeleteReplication:true 2024-11-21T00:27:43,221 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33b913e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:43,221 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:43,221 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:43,222 DEBUG [HMaster-EventLoopGroup-35-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:43,222 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:43,222 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:43,223 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@685be9eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:43,223 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:43,223 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:43,223 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:43,225 INFO [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33272, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:43,226 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6499b9d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:43,229 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:43,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:43,230 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@75da664a 2024-11-21T00:27:43,231 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:43,232 INFO [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33282, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:43,233 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.HMaster(3992): Client=jenkins//172.17.0.2 disable replication peer, id=1 2024-11-21T00:27:43,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.DisablePeerProcedure 2024-11-21T00:27:43,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:27:43,297 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:27:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:27:43,426 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1239, reset compression=false 2024-11-21T00:27:43,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33687 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=13 2024-11-21T00:27:43,451 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=DISABLE_PEER 2024-11-21T00:27:43,452 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.ReplicationSourceManager(490): Terminate replication source for 1 2024-11-21T00:27:43,452 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,33687,1732148839767 because: Peer 1 state or config changed. Will close the previous replication source and open a new one 2024-11-21T00:27:43,452 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] client.AsyncConnectionImpl(233): Connection has been closed by RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0. 2024-11-21T00:27:43,452 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.refreshSources(ReplicationSourceManager.java:492) at org.apache.hadoop.hbase.replication.regionserver.PeerProcedureHandlerImpl.refreshPeerState(PeerProcedureHandlerImpl.java:89) at org.apache.hadoop.hbase.replication.regionserver.PeerProcedureHandlerImpl.disablePeer(PeerProcedureHandlerImpl.java:108) at org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable.doCall(RefreshPeerCallable.java:60) at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:43,452 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:43,452 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:43,452 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:43,473 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1235, reset compression=false 2024-11-21T00:27:43,517 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1236, reset compression=false 2024-11-21T00:27:43,553 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:27:43,553 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(123): Interrupted while waiting for next replication entry batch java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1681) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.poll(ReplicationSourceWALReader.java:309) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:109) ~[classes/:?] 2024-11-21T00:27:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:27:43,653 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 terminated 2024-11-21T00:27:43,673 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,33687,1732148839767, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:27:43,674 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.ReplicationSourceManager(500): Startup replication source for 1 2024-11-21T00:27:43,674 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-21T00:27:43,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-21T00:27:43,680 INFO [PEWorker-1 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for DISABLE on 5ed4808ef0e6,33687,1732148839767 suceeded 2024-11-21T00:27:43,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-21T00:27:43,684 INFO [PEWorker-3 {}] replication.DisablePeerProcedure(67): Successfully disabled peer 1 2024-11-21T00:27:43,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 383 msec 2024-11-21T00:27:43,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.DisablePeerProcedure in 451 msec 2024-11-21T00:27:43,692 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:42819' 2024-11-21T00:27:43,697 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3ddd5653, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:43,697 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42819,-1 for getting cluster id 2024-11-21T00:27:43,698 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:43,699 DEBUG [HMaster-EventLoopGroup-31-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7aafd06b-565f-4e2c-a825-a8d6ed1445cb' 2024-11-21T00:27:43,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:43,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7aafd06b-565f-4e2c-a825-a8d6ed1445cb" 2024-11-21T00:27:43,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@64734100, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:43,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42819,-1] 2024-11-21T00:27:43,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:43,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:43,701 INFO [HMaster-EventLoopGroup-31-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36678, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:43,702 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@714efcf3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:43,702 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:43,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:43,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1a3219f5 2024-11-21T00:27:43,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-36-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:43,705 INFO [HMaster-EventLoopGroup-31-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36682, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=MasterService 2024-11-21T00:27:43,707 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,33687,1732148839767 (queues=1) is replicating from cluster=46c96938-ca9c-4723-82fe-8a99d3a89209 to cluster=7aafd06b-565f-4e2c-a825-a8d6ed1445cb 2024-11-21T00:27:43,707 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C33687%2C1732148839767 2024-11-21T00:27:43,707 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,33687,1732148839767, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:27:43,731 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C33687%2C1732148839767 2024-11-21T00:27:43,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:27:43,868 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: DISABLE_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:27:43,869 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:43,869 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.disablePeer(TestMasterReplication.java:639) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:274) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:43,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:43,869 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:43,869 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:43,875 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row3. IsDeleteReplication:false 2024-11-21T00:27:43,903 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1235, reset compression=false 2024-11-21T00:27:44,040 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1236, reset compression=false 2024-11-21T00:27:44,051 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:44,051 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1340, reset compression=false 2024-11-21T00:27:44,051 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[{test/69eb84528d7bfcc8bfbb1997d42d5d19/10=[#edits: 1 = ],207}], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=1340, nbRowKeys=1, nbHFiles=0, heapSize=207, lastSeqIds={}, endOfFile=false,usedBufferSize=207] 2024-11-21T00:27:44,052 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:44,055 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=44369 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:44,221 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:44,266 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1340, reset compression=false 2024-11-21T00:27:44,430 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1235, reset compression=false 2024-11-21T00:27:44,466 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:44,466 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1362, reset compression=false 2024-11-21T00:27:44,466 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[{test/fa18a0bdbdf880445723239afb964dd9/10=[#edits: 1 = ],223}], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=1362, nbRowKeys=1, nbHFiles=0, heapSize=223, lastSeqIds={}, endOfFile=false,usedBufferSize=223] 2024-11-21T00:27:44,467 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:44,469 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:44,604 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1340, reset compression=false 2024-11-21T00:27:44,712 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1362, reset compression=false 2024-11-21T00:27:44,879 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row3. IsDeleteReplication:false 2024-11-21T00:27:44,890 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64f78316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:44,890 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,46151,-1 for getting cluster id 2024-11-21T00:27:44,890 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:44,891 DEBUG [HMaster-EventLoopGroup-35-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '46c96938-ca9c-4723-82fe-8a99d3a89209' 2024-11-21T00:27:44,891 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:44,891 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "46c96938-ca9c-4723-82fe-8a99d3a89209" 2024-11-21T00:27:44,891 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5211bfce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:44,891 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,46151,-1] 2024-11-21T00:27:44,892 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:44,892 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:44,893 INFO [HMaster-EventLoopGroup-35-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33308, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:44,893 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c66fb65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:44,896 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:27:44,897 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:44,897 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@11a2465 2024-11-21T00:27:44,898 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:27:44,899 INFO [HMaster-EventLoopGroup-35-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33324, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:27:44,900 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.HMaster(3986): Client=jenkins//172.17.0.2 enable replication peer, id=1 2024-11-21T00:27:44,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] procedure2.ProcedureExecutor(1139): Stored pid=14, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.EnablePeerProcedure 2024-11-21T00:27:44,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=14 2024-11-21T00:27:44,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:27:45,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=14 2024-11-21T00:27:45,041 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1362, reset compression=false 2024-11-21T00:27:45,044 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1340, reset compression=false 2024-11-21T00:27:45,066 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:45,066 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:45,066 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[{test/fa18a0bdbdf880445723239afb964dd9/11=[#edits: 1 = ],207}], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=1466, nbRowKeys=1, nbHFiles=0, heapSize=207, lastSeqIds={}, endOfFile=false,usedBufferSize=207] 2024-11-21T00:27:45,068 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:45,073 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33687 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:45,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33687 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=15 2024-11-21T00:27:45,131 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=15}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ENABLE_PEER 2024-11-21T00:27:45,136 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-21T00:27:45,136 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, startPosition=1239, beingWritten=true 2024-11-21T00:27:45,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-11-21T00:27:45,139 INFO [PEWorker-4 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ENABLE on 5ed4808ef0e6,33687,1732148839767 suceeded 2024-11-21T00:27:45,142 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-21T00:27:45,142 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 168 msec 2024-11-21T00:27:45,143 INFO [PEWorker-4 {}] replication.EnablePeerProcedure(67): Successfully enabled peer 1 2024-11-21T00:27:45,144 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.EnablePeerProcedure in 243 msec 2024-11-21T00:27:45,170 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:27:45,170 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1518, reset compression=false 2024-11-21T00:27:45,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46151 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=14 2024-11-21T00:27:45,228 INFO [RPCClient-NioEventLoopGroup-4-10 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ENABLE_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:27:45,228 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:45,228 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.enablePeer(TestMasterReplication.java:646) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:281) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:45,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:45,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:45,228 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:45,231 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row4. IsDeleteReplication:false 2024-11-21T00:27:45,236 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[{test/b1acf010e5e0a45cfda5ab304dae89d4/11=[#edits: 1 = ],223}], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=1518, nbRowKeys=1, nbHFiles=0, heapSize=223, lastSeqIds={}, endOfFile=false,usedBufferSize=223] 2024-11-21T00:27:45,239 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:45,240 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-32-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37554, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.16 (auth:SIMPLE), service=AdminService 2024-11-21T00:27:45,240 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:27:45,245 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=33853 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:27:45,297 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:45,400 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1518, reset compression=false 2024-11-21T00:27:45,577 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1340, reset compression=false 2024-11-21T00:27:45,605 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:45,605 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:45,605 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=1491, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:45,612 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:45,746 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1518, reset compression=false 2024-11-21T00:27:45,828 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:46,057 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:46,161 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:46,179 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1518, reset compression=false 2024-11-21T00:27:46,233 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row4. IsDeleteReplication:false 2024-11-21T00:27:46,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:27:46,234 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:46,234 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:288) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:46,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:46,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:46,234 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:27:46,234 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1108890809, stopped=false 2024-11-21T00:27:46,234 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,46151,1732148839233 2024-11-21T00:27:46,240 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:46,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-559595819/running 2024-11-21T00:27:46,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-559595819/running 2024-11-21T00:27:46,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:46,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:46,348 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:27:46,349 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:46,349 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:288) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:46,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:46,349 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,33687,1732148839767' ***** 2024-11-21T00:27:46,349 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:27:46,349 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on znode that does not yet exist, /2-559595819/running 2024-11-21T00:27:46,349 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Set watcher on znode that does not yet exist, /2-559595819/running 2024-11-21T00:27:46,351 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:27:46,351 INFO [RS:0;5ed4808ef0e6:33687 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:27:46,351 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:27:46,351 INFO [RS:0;5ed4808ef0e6:33687 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:27:46,351 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(3091): Received CLOSE for b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(3091): Received CLOSE for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33687. 2024-11-21T00:27:46,361 DEBUG [RS:0;5ed4808ef0e6:33687 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:46,361 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:27:46,361 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:27:46,364 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b1acf010e5e0a45cfda5ab304dae89d4, disabling compactions & flushes 2024-11-21T00:27:46,364 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:46,364 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:46,364 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. after waiting 0 ms 2024-11-21T00:27:46,364 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:46,367 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing b1acf010e5e0a45cfda5ab304dae89d4 3/3 column families, dataSize=342 B heapSize=1.99 KB 2024-11-21T00:27:46,368 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:27:46,368 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1325): Online Regions={b1acf010e5e0a45cfda5ab304dae89d4=test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4., 1588230740=hbase:meta,,1.1588230740, 8c770a4c46c01e4bd8e16042c47ed8e3=hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3.} 2024-11-21T00:27:46,368 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8c770a4c46c01e4bd8e16042c47ed8e3, b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:46,369 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:46,369 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:46,369 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:46,369 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:46,369 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:46,369 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:27:46,427 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/info/464cb78ef6904b5b8206065602563b68 is 147, key is hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3./info:regioninfo/1732148852819/Put/seqid=0 2024-11-21T00:27:46,429 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/f/a262ed5f505d48a8b0d172d3a10ff20b is 37, key is row3/f:row3/1732148863873/Put/seqid=0 2024-11-21T00:27:46,433 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:27:46,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741840_1016 (size=7686) 2024-11-21T00:27:46,483 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/info/464cb78ef6904b5b8206065602563b68 2024-11-21T00:27:46,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741841_1017 (size=5228) 2024-11-21T00:27:46,486 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=236 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/f/a262ed5f505d48a8b0d172d3a10ff20b 2024-11-21T00:27:46,493 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a262ed5f505d48a8b0d172d3a10ff20b 2024-11-21T00:27:46,544 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/f1/13743ac6be494cbcaa46b8d01cddf9d4 is 30, key is row1/f1:/1732148861190/DeleteFamily/seqid=0 2024-11-21T00:27:46,582 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8c770a4c46c01e4bd8e16042c47ed8e3, b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:46,584 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:46,587 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/ns/cbaa0ef6789a43d9be001b5c729fda9d is 43, key is default/ns:d/1732148841923/Put/seqid=0 2024-11-21T00:27:46,589 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:46,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741842_1018 (size=5158) 2024-11-21T00:27:46,688 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1518, reset compression=false 2024-11-21T00:27:46,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741843_1019 (size=5153) 2024-11-21T00:27:46,698 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/ns/cbaa0ef6789a43d9be001b5c729fda9d 2024-11-21T00:27:46,734 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/rep_barrier/300a8e4c447541edb7ab3aa7fe73bf98 is 112, key is test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4./rep_barrier:seqnumDuringOpen/1732148846488/Put/seqid=0 2024-11-21T00:27:46,738 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:46,738 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33687,1732148839767 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942, lastWalPosition=1765, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:46,738 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1765, reset compression=false 2024-11-21T00:27:46,739 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,33687,1732148839767: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,33687,1732148839767, walGroup=5ed4808ef0e6%2C33687%2C1732148839767, offset=5ed4808ef0e6%2C33687%2C1732148839767.1732148840942:1765, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:33687 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:27:46,741 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:27:46,741 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:27:46,742 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:27:46,742 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:27:46,742 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:27:46,742 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1319108608, "init": 1048576000, "max": 2306867200, "used": 819766784 }, "NonHeapMemoryUsage": { "committed": 202571776, "init": 7667712, "max": -1, "used": 199533000 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:27:46,743 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:46151 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:46151 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:27:46,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741844_1020 (size=5518) 2024-11-21T00:27:46,779 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/rep_barrier/300a8e4c447541edb7ab3aa7fe73bf98 2024-11-21T00:27:46,782 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8c770a4c46c01e4bd8e16042c47ed8e3, b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:46,858 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/table/55eeba8009e04d789112fb39b8b7be34 is 53, key is hbase:replication/table:state/1732148852861/Put/seqid=0 2024-11-21T00:27:46,884 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:27:46,884 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:27:46,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741845_1021 (size=5308) 2024-11-21T00:27:46,898 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/table/55eeba8009e04d789112fb39b8b7be34 2024-11-21T00:27:46,907 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/info/464cb78ef6904b5b8206065602563b68 as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/info/464cb78ef6904b5b8206065602563b68 2024-11-21T00:27:46,919 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/info/464cb78ef6904b5b8206065602563b68, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:27:46,921 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/ns/cbaa0ef6789a43d9be001b5c729fda9d as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/ns/cbaa0ef6789a43d9be001b5c729fda9d 2024-11-21T00:27:46,929 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/ns/cbaa0ef6789a43d9be001b5c729fda9d, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:27:46,930 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/rep_barrier/300a8e4c447541edb7ab3aa7fe73bf98 as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/rep_barrier/300a8e4c447541edb7ab3aa7fe73bf98 2024-11-21T00:27:46,936 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/rep_barrier/300a8e4c447541edb7ab3aa7fe73bf98, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:27:46,938 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/.tmp/table/55eeba8009e04d789112fb39b8b7be34 as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/table/55eeba8009e04d789112fb39b8b7be34 2024-11-21T00:27:46,944 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/table/55eeba8009e04d789112fb39b8b7be34, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:27:46,945 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 576ms, sequenceid=16, compaction requested=false 2024-11-21T00:27:46,983 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8c770a4c46c01e4bd8e16042c47ed8e3, b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:46,998 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=77 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/f1/13743ac6be494cbcaa46b8d01cddf9d4 2024-11-21T00:27:47,007 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 13743ac6be494cbcaa46b8d01cddf9d4 2024-11-21T00:27:47,019 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:27:47,020 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:27:47,020 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:27:47,020 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:47,020 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148866369Running coprocessor pre-close hooks at 1732148866369Disabling compacts and flushes for region at 1732148866369Disabling writes for close at 1732148866369Obtaining lock to block concurrent updates at 1732148866369Preparing flush snapshotting stores in 1588230740 at 1732148866369Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148866370 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148866370Flushing 1588230740/info: creating writer at 1732148866370Flushing 1588230740/info: appending metadata at 1732148866421 (+51 ms)Flushing 1588230740/info: closing flushed file at 1732148866421Flushing 1588230740/ns: creating writer at 1732148866504 (+83 ms)Flushing 1588230740/ns: appending metadata at 1732148866586 (+82 ms)Flushing 1588230740/ns: closing flushed file at 1732148866586Flushing 1588230740/rep_barrier: creating writer at 1732148866702 (+116 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148866734 (+32 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148866734Flushing 1588230740/table: creating writer at 1732148866784 (+50 ms)Flushing 1588230740/table: appending metadata at 1732148866857 (+73 ms)Flushing 1588230740/table: closing flushed file at 1732148866857Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24c35ed4: reopening flushed file at 1732148866906 (+49 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c95cf05: reopening flushed file at 1732148866920 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6104e545: reopening flushed file at 1732148866929 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27f5d9ce: reopening flushed file at 1732148866936 (+7 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 576ms, sequenceid=16, compaction requested=false at 1732148866945 (+9 ms)Writing region close event to WAL at 1732148867004 (+59 ms)Running coprocessor post-close hooks at 1732148867020 (+16 ms)Closed at 1732148867020 2024-11-21T00:27:47,020 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:47,039 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1765, reset compression=false 2024-11-21T00:27:47,040 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/norep/775f6983cf1d432e921d67b7fdfbfcec is 33, key is row2/norep:/1732148862202/DeleteFamily/seqid=0 2024-11-21T00:27:47,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741846_1022 (size=5108) 2024-11-21T00:27:47,117 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/norep/775f6983cf1d432e921d67b7fdfbfcec 2024-11-21T00:27:47,135 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 775f6983cf1d432e921d67b7fdfbfcec 2024-11-21T00:27:47,136 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/f/a262ed5f505d48a8b0d172d3a10ff20b as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/f/a262ed5f505d48a8b0d172d3a10ff20b 2024-11-21T00:27:47,142 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a262ed5f505d48a8b0d172d3a10ff20b 2024-11-21T00:27:47,142 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/f/a262ed5f505d48a8b0d172d3a10ff20b, entries=5, sequenceid=12, filesize=5.1 K 2024-11-21T00:27:47,143 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/f1/13743ac6be494cbcaa46b8d01cddf9d4 as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/f1/13743ac6be494cbcaa46b8d01cddf9d4 2024-11-21T00:27:47,151 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:47,155 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 13743ac6be494cbcaa46b8d01cddf9d4 2024-11-21T00:27:47,155 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/f1/13743ac6be494cbcaa46b8d01cddf9d4, entries=3, sequenceid=12, filesize=5.0 K 2024-11-21T00:27:47,156 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/.tmp/norep/775f6983cf1d432e921d67b7fdfbfcec as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/norep/775f6983cf1d432e921d67b7fdfbfcec 2024-11-21T00:27:47,168 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 775f6983cf1d432e921d67b7fdfbfcec 2024-11-21T00:27:47,168 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/norep/775f6983cf1d432e921d67b7fdfbfcec, entries=1, sequenceid=12, filesize=5.0 K 2024-11-21T00:27:47,169 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~342 B/342, heapSize ~1.95 KB/1992, currentSize=0 B/0 for b1acf010e5e0a45cfda5ab304dae89d4 in 805ms, sequenceid=12, compaction requested=false 2024-11-21T00:27:47,184 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1351): Waiting on 8c770a4c46c01e4bd8e16042c47ed8e3, b1acf010e5e0a45cfda5ab304dae89d4 2024-11-21T00:27:47,255 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/data/default/test/b1acf010e5e0a45cfda5ab304dae89d4/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-11-21T00:27:47,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:27:47,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:27:47,256 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:47,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b1acf010e5e0a45cfda5ab304dae89d4: Waiting for close lock at 1732148866363Running coprocessor pre-close hooks at 1732148866363Disabling compacts and flushes for region at 1732148866363Disabling writes for close at 1732148866364 (+1 ms)Obtaining lock to block concurrent updates at 1732148866367 (+3 ms)Preparing flush snapshotting stores in b1acf010e5e0a45cfda5ab304dae89d4 at 1732148866367Finished memstore snapshotting test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4., syncing WAL and waiting on mvcc, flushsize=dataSize=342, getHeapSize=1992, getOffHeapSize=0, getCellsCount=12 at 1732148866368 (+1 ms)Flushing stores of test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. at 1732148866369 (+1 ms)Flushing b1acf010e5e0a45cfda5ab304dae89d4/f: creating writer at 1732148866369Flushing b1acf010e5e0a45cfda5ab304dae89d4/f: appending metadata at 1732148866419 (+50 ms)Flushing b1acf010e5e0a45cfda5ab304dae89d4/f: closing flushed file at 1732148866419Flushing b1acf010e5e0a45cfda5ab304dae89d4/f1: creating writer at 1732148866493 (+74 ms)Flushing b1acf010e5e0a45cfda5ab304dae89d4/f1: appending metadata at 1732148866543 (+50 ms)Flushing b1acf010e5e0a45cfda5ab304dae89d4/f1: closing flushed file at 1732148866543Flushing b1acf010e5e0a45cfda5ab304dae89d4/norep: creating writer at 1732148867007 (+464 ms)Flushing b1acf010e5e0a45cfda5ab304dae89d4/norep: appending metadata at 1732148867039 (+32 ms)Flushing b1acf010e5e0a45cfda5ab304dae89d4/norep: closing flushed file at 1732148867039Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@175c0195: reopening flushed file at 1732148867135 (+96 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70ee595e: reopening flushed file at 1732148867143 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@efa1ca9: reopening flushed file at 1732148867155 (+12 ms)Finished flush of dataSize ~342 B/342, heapSize ~1.95 KB/1992, currentSize=0 B/0 for b1acf010e5e0a45cfda5ab304dae89d4 in 805ms, sequenceid=12, compaction requested=false at 1732148867169 (+14 ms)Writing region close event to WAL at 1732148867203 (+34 ms)Running coprocessor post-close hooks at 1732148867256 (+53 ms)Closed at 1732148867256 2024-11-21T00:27:47,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148845340.b1acf010e5e0a45cfda5ab304dae89d4. 2024-11-21T00:27:47,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8c770a4c46c01e4bd8e16042c47ed8e3, disabling compactions & flushes 2024-11-21T00:27:47,256 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,256 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. after waiting 0 ms 2024-11-21T00:27:47,257 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,257 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8c770a4c46c01e4bd8e16042c47ed8e3: Waiting for close lock at 1732148867256Running coprocessor pre-close hooks at 1732148867256Disabling compacts and flushes for region at 1732148867256Disabling writes for close at 1732148867257 (+1 ms)Failed flush hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3., putting online again at 1732148867257 2024-11-21T00:27:47,257 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2435): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,277 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:47,384 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:27:47,384 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1325): Online Regions={8c770a4c46c01e4bd8e16042c47ed8e3=hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3.} 2024-11-21T00:27:47,384 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(3091): Received CLOSE for 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:47,384 DEBUG [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1351): Waiting on 8c770a4c46c01e4bd8e16042c47ed8e3 2024-11-21T00:27:47,384 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8c770a4c46c01e4bd8e16042c47ed8e3, disabling compactions & flushes 2024-11-21T00:27:47,384 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,384 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,384 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. after waiting 0 ms 2024-11-21T00:27:47,384 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,384 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8c770a4c46c01e4bd8e16042c47ed8e3: Waiting for close lock at 1732148867384Running coprocessor pre-close hooks at 1732148867384Disabling compacts and flushes for region at 1732148867384Disabling writes for close at 1732148867384Failed flush hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3., putting online again at 1732148867384 2024-11-21T00:27:47,385 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2435): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:replication,,1732148851912.8c770a4c46c01e4bd8e16042c47ed8e3. 2024-11-21T00:27:47,387 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 1765, reset compression=false 2024-11-21T00:27:47,419 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:47,419 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.wal-reader.5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/WALs/5ed4808ef0e6,33687,1732148839767/5ed4808ef0e6%2C33687%2C1732148839767.1732148840942 to pos 2409, reset compression=false 2024-11-21T00:27:47,584 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1346): We were exiting though online regions are not empty, because some regions failed closing 2024-11-21T00:27:47,584 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,33687,1732148839767; all regions closed. 2024-11-21T00:27:47,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:27:47,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741839_1015 (size=2390) 2024-11-21T00:27:47,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741832_1008 (size=2417) 2024-11-21T00:27:47,618 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:47,618 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:27:47,619 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:27:47,619 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:27:47,619 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:27:47,619 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:27:47,619 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,33687,1732148839767 because: Region server is closing 2024-11-21T00:27:47,620 INFO [RS:0;5ed4808ef0e6:33687 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33687. 2024-11-21T00:27:47,620 DEBUG [RS:0;5ed4808ef0e6:33687 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:47,620 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:47,620 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:47,620 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:47,720 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33687,1732148839767.replicationSource.shipper5ed4808ef0e6%2C33687%2C1732148839767,1-5ed4808ef0e6,33687,1732148839767 terminated 2024-11-21T00:27:47,721 INFO [RS:0;5ed4808ef0e6:33687 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33687. 2024-11-21T00:27:47,721 DEBUG [RS:0;5ed4808ef0e6:33687 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:47,721 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:47,721 DEBUG [RS:0;5ed4808ef0e6:33687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:47,721 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:47,721 INFO [RS:0;5ed4808ef0e6:33687 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33687 2024-11-21T00:27:47,729 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:47,768 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:47,804 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:27:47,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-559595819/rs/5ed4808ef0e6,33687,1732148839767 2024-11-21T00:27:47,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819/rs 2024-11-21T00:27:47,805 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,33687,1732148839767] 2024-11-21T00:27:47,822 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /2-559595819/draining/5ed4808ef0e6,33687,1732148839767 already deleted, retry=false 2024-11-21T00:27:47,822 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,33687,1732148839767 expired; onlineServers=0 2024-11-21T00:27:47,822 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,46151,1732148839233' ***** 2024-11-21T00:27:47,822 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:27:47,822 INFO [M:0;5ed4808ef0e6:46151 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:27:47,822 INFO [M:0;5ed4808ef0e6:46151 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:27:47,822 DEBUG [M:0;5ed4808ef0e6:46151 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:27:47,823 DEBUG [M:0;5ed4808ef0e6:46151 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:27:47,823 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:27:47,823 INFO [M:0;5ed4808ef0e6:46151 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:27:47,823 INFO [M:0;5ed4808ef0e6:46151 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:27:47,823 DEBUG [M:0;5ed4808ef0e6:46151 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:27:47,823 INFO [M:0;5ed4808ef0e6:46151 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:27:47,823 INFO [M:0;5ed4808ef0e6:46151 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:27:47,823 INFO [M:0;5ed4808ef0e6:46151 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:27:47,823 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148840692 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148840692,5,FailOnTimeoutGroup] 2024-11-21T00:27:47,823 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:27:47,824 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148840688 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148840688,5,FailOnTimeoutGroup] 2024-11-21T00:27:47,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-559595819/master 2024-11-21T00:27:47,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-559595819 2024-11-21T00:27:47,871 DEBUG [M:0;5ed4808ef0e6:46151 {}] zookeeper.ZKUtil(347): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Unable to get data of znode /2-559595819/master because node does not exist (not an error) 2024-11-21T00:27:47,871 WARN [M:0;5ed4808ef0e6:46151 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:27:47,872 INFO [M:0;5ed4808ef0e6:46151 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/.lastflushedseqids 2024-11-21T00:27:47,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741847_1023 (size=245) 2024-11-21T00:27:47,886 INFO [M:0;5ed4808ef0e6:46151 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:27:47,887 INFO [M:0;5ed4808ef0e6:46151 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:27:47,887 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:47,887 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:47,887 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:47,887 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:47,887 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:47,887 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=63.59 KB heapSize=75.51 KB 2024-11-21T00:27:47,906 DEBUG [M:0;5ed4808ef0e6:46151 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c03d10d6d1f844909db77ddcd5df6f26 is 82, key is hbase:meta,,1/info:regioninfo/1732148841903/Put/seqid=0 2024-11-21T00:27:47,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:47,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33687-0x1015ac95b3a0007, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:47,912 INFO [RS:0;5ed4808ef0e6:33687 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:27:47,912 INFO [RS:0;5ed4808ef0e6:33687 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,33687,1732148839767; zookeeper connection closed. 2024-11-21T00:27:47,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741848_1024 (size=5672) 2024-11-21T00:27:47,916 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c03d10d6d1f844909db77ddcd5df6f26 2024-11-21T00:27:47,916 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3fb5f870 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3fb5f870 2024-11-21T00:27:47,917 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:27:47,944 DEBUG [M:0;5ed4808ef0e6:46151 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af4bdd92de4548608e662ee0fa305948 is 1480, key is \x00\x00\x00\x00\x00\x00\x00\x08/proc:d/1732148852870/Put/seqid=0 2024-11-21T00:27:47,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741849_1025 (size=9213) 2024-11-21T00:27:47,948 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=63.04 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af4bdd92de4548608e662ee0fa305948 2024-11-21T00:27:47,972 DEBUG [M:0;5ed4808ef0e6:46151 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6e96a77e8e9f4d639224bd63109af614 is 69, key is 5ed4808ef0e6,33687,1732148839767/rs:state/1732148840737/Put/seqid=0 2024-11-21T00:27:47,982 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:47,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741850_1026 (size=5156) 2024-11-21T00:27:47,994 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6e96a77e8e9f4d639224bd63109af614 2024-11-21T00:27:48,000 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c03d10d6d1f844909db77ddcd5df6f26 as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c03d10d6d1f844909db77ddcd5df6f26 2024-11-21T00:27:48,009 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c03d10d6d1f844909db77ddcd5df6f26, entries=8, sequenceid=127, filesize=5.5 K 2024-11-21T00:27:48,010 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/af4bdd92de4548608e662ee0fa305948 as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/af4bdd92de4548608e662ee0fa305948 2024-11-21T00:27:48,017 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/af4bdd92de4548608e662ee0fa305948, entries=15, sequenceid=127, filesize=9.0 K 2024-11-21T00:27:48,018 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6e96a77e8e9f4d639224bd63109af614 as hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6e96a77e8e9f4d639224bd63109af614 2024-11-21T00:27:48,024 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45089/user/jenkins/test-data/b0912622-2238-e963-510d-ad4b62f1f09e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6e96a77e8e9f4d639224bd63109af614, entries=1, sequenceid=127, filesize=5.0 K 2024-11-21T00:27:48,025 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(3140): Finished flush of dataSize ~63.59 KB/65121, heapSize ~75.21 KB/77016, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=127, compaction requested=false 2024-11-21T00:27:48,040 INFO [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:48,041 DEBUG [M:0;5ed4808ef0e6:46151 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148867887Disabling compacts and flushes for region at 1732148867887Disabling writes for close at 1732148867887Obtaining lock to block concurrent updates at 1732148867887Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148867887Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=65121, getHeapSize=77256, getOffHeapSize=0, getCellsCount=148 at 1732148867887Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148867888 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148867888Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148867906 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148867906Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148867920 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148867944 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148867944Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148867953 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148867969 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148867969Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51baef3d: reopening flushed file at 1732148867999 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2653e314: reopening flushed file at 1732148868009 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f62ba8a: reopening flushed file at 1732148868017 (+8 ms)Finished flush of dataSize ~63.59 KB/65121, heapSize ~75.21 KB/77016, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 138ms, sequenceid=127, compaction requested=false at 1732148868025 (+8 ms)Writing region close event to WAL at 1732148868040 (+15 ms)Closed at 1732148868040 2024-11-21T00:27:48,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38013 is added to blk_1073741830_1006 (size=73878) 2024-11-21T00:27:48,044 INFO [M:0;5ed4808ef0e6:46151 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:27:48,044 INFO [M:0;5ed4808ef0e6:46151 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46151 2024-11-21T00:27:48,044 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:27:48,048 INFO [M:0;5ed4808ef0e6:46151 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:27:48,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:48,175 INFO [M:0;5ed4808ef0e6:46151 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:27:48,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46151-0x1015ac95b3a0006, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:48,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2adf0652{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:27:48,179 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f055b0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:27:48,179 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:27:48,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79874c3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:27:48,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70d4617e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.log.dir/,STOPPED} 2024-11-21T00:27:48,181 WARN [BP-651598342-172.17.0.2-1732148836043 heartbeating to localhost/127.0.0.1:45089 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:27:48,181 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:27:48,181 WARN [BP-651598342-172.17.0.2-1732148836043 heartbeating to localhost/127.0.0.1:45089 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-651598342-172.17.0.2-1732148836043 (Datanode Uuid 47717199-df6d-4f3b-80f3-c504122bf7c4) service to localhost/127.0.0.1:45089 2024-11-21T00:27:48,181 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:27:48,182 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/cluster_86413a8d-8fa4-d205-de8d-389e28773bd9/data/data1/current/BP-651598342-172.17.0.2-1732148836043 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:27:48,182 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/cluster_86413a8d-8fa4-d205-de8d-389e28773bd9/data/data2/current/BP-651598342-172.17.0.2-1732148836043 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:27:48,183 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:27:48,191 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@141afe7a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:27:48,192 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f702d45{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:27:48,192 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:27:48,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dcb40b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:27:48,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fec52ff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.log.dir/,STOPPED} 2024-11-21T00:27:48,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:27:48,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:27:48,208 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:48,208 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:288) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:48,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:48,208 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:48,208 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:48,209 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:27:48,209 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1018356498, stopped=false 2024-11-21T00:27:48,209 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,41951,1732148832855 2024-11-21T00:27:48,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-2137408572/running 2024-11-21T00:27:48,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-2137408572/running 2024-11-21T00:27:48,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:48,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:48,222 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:27:48,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on znode that does not yet exist, /1-2137408572/running 2024-11-21T00:27:48,223 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:48,223 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Set watcher on znode that does not yet exist, /1-2137408572/running 2024-11-21T00:27:48,223 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:288) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:48,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:48,224 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,44369,1732148833096' ***** 2024-11-21T00:27:48,224 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:27:48,224 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:27:48,224 INFO [RS:0;5ed4808ef0e6:44369 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:27:48,224 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:27:48,225 INFO [RS:0;5ed4808ef0e6:44369 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:27:48,225 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(3091): Received CLOSE for fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:48,225 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(3091): Received CLOSE for 7dc046db6be22eef18a273b77c92911e 2024-11-21T00:27:48,225 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:48,225 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:27:48,225 INFO [RS:0;5ed4808ef0e6:44369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:44369. 2024-11-21T00:27:48,225 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing fa18a0bdbdf880445723239afb964dd9, disabling compactions & flushes 2024-11-21T00:27:48,225 DEBUG [RS:0;5ed4808ef0e6:44369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:48,225 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:48,225 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:48,225 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:48,225 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. after waiting 0 ms 2024-11-21T00:27:48,225 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:48,225 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:27:48,225 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing fa18a0bdbdf880445723239afb964dd9 3/3 column families, dataSize=342 B heapSize=1.99 KB 2024-11-21T00:27:48,228 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:27:48,228 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:27:48,228 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:27:48,228 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:27:48,228 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1325): Online Regions={fa18a0bdbdf880445723239afb964dd9=test,,1732148843191.fa18a0bdbdf880445723239afb964dd9., 7dc046db6be22eef18a273b77c92911e=hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:27:48,228 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7dc046db6be22eef18a273b77c92911e, fa18a0bdbdf880445723239afb964dd9 2024-11-21T00:27:48,229 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:48,229 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:48,229 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:48,229 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:48,229 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:48,229 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:27:48,245 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/f/c062aa7fdbe946eca2a661c780bdcb38 is 37, key is row3/f:row3/1732148863873/Put/seqid=0 2024-11-21T00:27:48,248 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/info/2171a69fc60d4e1284b5f0e9e71356e4 is 147, key is hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e./info:regioninfo/1732148850329/Put/seqid=0 2024-11-21T00:27:48,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741840_1016 (size=5228) 2024-11-21T00:27:48,250 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=236 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/f/c062aa7fdbe946eca2a661c780bdcb38 2024-11-21T00:27:48,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741841_1017 (size=7686) 2024-11-21T00:27:48,258 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c062aa7fdbe946eca2a661c780bdcb38 2024-11-21T00:27:48,277 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/f1/7f10a58f16594572955a06cd5def0491 is 30, key is row1/f1:/1732148861190/DeleteFamily/seqid=0 2024-11-21T00:27:48,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741842_1018 (size=5158) 2024-11-21T00:27:48,283 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:27:48,290 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=77 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/f1/7f10a58f16594572955a06cd5def0491 2024-11-21T00:27:48,295 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7f10a58f16594572955a06cd5def0491 2024-11-21T00:27:48,314 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/norep/bc494d9d284b40ef8d4a527939993cb6 is 33, key is row1/norep:/1732148861190/DeleteFamily/seqid=0 2024-11-21T00:27:48,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741843_1019 (size=5108) 2024-11-21T00:27:48,321 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/norep/bc494d9d284b40ef8d4a527939993cb6 2024-11-21T00:27:48,325 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bc494d9d284b40ef8d4a527939993cb6 2024-11-21T00:27:48,326 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/f/c062aa7fdbe946eca2a661c780bdcb38 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/f/c062aa7fdbe946eca2a661c780bdcb38 2024-11-21T00:27:48,330 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c062aa7fdbe946eca2a661c780bdcb38 2024-11-21T00:27:48,330 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/f/c062aa7fdbe946eca2a661c780bdcb38, entries=5, sequenceid=12, filesize=5.1 K 2024-11-21T00:27:48,330 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/f1/7f10a58f16594572955a06cd5def0491 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/f1/7f10a58f16594572955a06cd5def0491 2024-11-21T00:27:48,334 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7f10a58f16594572955a06cd5def0491 2024-11-21T00:27:48,334 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/f1/7f10a58f16594572955a06cd5def0491, entries=3, sequenceid=12, filesize=5.0 K 2024-11-21T00:27:48,335 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/.tmp/norep/bc494d9d284b40ef8d4a527939993cb6 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/norep/bc494d9d284b40ef8d4a527939993cb6 2024-11-21T00:27:48,339 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for bc494d9d284b40ef8d4a527939993cb6 2024-11-21T00:27:48,339 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/norep/bc494d9d284b40ef8d4a527939993cb6, entries=1, sequenceid=12, filesize=5.0 K 2024-11-21T00:27:48,340 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~342 B/342, heapSize ~1.95 KB/1992, currentSize=0 B/0 for fa18a0bdbdf880445723239afb964dd9 in 115ms, sequenceid=12, compaction requested=false 2024-11-21T00:27:48,344 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/default/test/fa18a0bdbdf880445723239afb964dd9/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-11-21T00:27:48,344 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:27:48,344 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:27:48,345 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:48,345 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for fa18a0bdbdf880445723239afb964dd9: Waiting for close lock at 1732148868225Running coprocessor pre-close hooks at 1732148868225Disabling compacts and flushes for region at 1732148868225Disabling writes for close at 1732148868225Obtaining lock to block concurrent updates at 1732148868225Preparing flush snapshotting stores in fa18a0bdbdf880445723239afb964dd9 at 1732148868225Finished memstore snapshotting test,,1732148843191.fa18a0bdbdf880445723239afb964dd9., syncing WAL and waiting on mvcc, flushsize=dataSize=342, getHeapSize=1992, getOffHeapSize=0, getCellsCount=12 at 1732148868226 (+1 ms)Flushing stores of test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. at 1732148868226Flushing fa18a0bdbdf880445723239afb964dd9/f: creating writer at 1732148868226Flushing fa18a0bdbdf880445723239afb964dd9/f: appending metadata at 1732148868245 (+19 ms)Flushing fa18a0bdbdf880445723239afb964dd9/f: closing flushed file at 1732148868245Flushing fa18a0bdbdf880445723239afb964dd9/f1: creating writer at 1732148868259 (+14 ms)Flushing fa18a0bdbdf880445723239afb964dd9/f1: appending metadata at 1732148868276 (+17 ms)Flushing fa18a0bdbdf880445723239afb964dd9/f1: closing flushed file at 1732148868276Flushing fa18a0bdbdf880445723239afb964dd9/norep: creating writer at 1732148868296 (+20 ms)Flushing fa18a0bdbdf880445723239afb964dd9/norep: appending metadata at 1732148868313 (+17 ms)Flushing fa18a0bdbdf880445723239afb964dd9/norep: closing flushed file at 1732148868314 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@785291ec: reopening flushed file at 1732148868325 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55965c4: reopening flushed file at 1732148868330 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d3430a4: reopening flushed file at 1732148868335 (+5 ms)Finished flush of dataSize ~342 B/342, heapSize ~1.95 KB/1992, currentSize=0 B/0 for fa18a0bdbdf880445723239afb964dd9 in 115ms, sequenceid=12, compaction requested=false at 1732148868340 (+5 ms)Writing region close event to WAL at 1732148868341 (+1 ms)Running coprocessor post-close hooks at 1732148868344 (+3 ms)Closed at 1732148868345 (+1 ms) 2024-11-21T00:27:48,345 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148843191.fa18a0bdbdf880445723239afb964dd9. 2024-11-21T00:27:48,345 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7dc046db6be22eef18a273b77c92911e, disabling compactions & flushes 2024-11-21T00:27:48,345 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:48,345 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:48,345 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. after waiting 0 ms 2024-11-21T00:27:48,345 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:48,345 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7dc046db6be22eef18a273b77c92911e 3/3 column families, dataSize=1.46 KB heapSize=2.94 KB 2024-11-21T00:27:48,369 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e/.tmp/queue/fb463c76cf2345cf85a003f7a9480cc8 is 154, key is 1-5ed4808ef0e6,44369,1732148833096/queue:5ed4808ef0e6%2C44369%2C1732148833096/1732148865075/Put/seqid=0 2024-11-21T00:27:48,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741844_1020 (size=5353) 2024-11-21T00:27:48,382 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.46 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e/.tmp/queue/fb463c76cf2345cf85a003f7a9480cc8 2024-11-21T00:27:48,387 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e/.tmp/queue/fb463c76cf2345cf85a003f7a9480cc8 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e/queue/fb463c76cf2345cf85a003f7a9480cc8 2024-11-21T00:27:48,393 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e/queue/fb463c76cf2345cf85a003f7a9480cc8, entries=1, sequenceid=14, filesize=5.2 K 2024-11-21T00:27:48,394 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.46 KB/1492, heapSize ~2.42 KB/2480, currentSize=0 B/0 for 7dc046db6be22eef18a273b77c92911e in 49ms, sequenceid=14, compaction requested=false 2024-11-21T00:27:48,409 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/replication/7dc046db6be22eef18a273b77c92911e/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-11-21T00:27:48,413 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:27:48,413 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:27:48,413 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:48,413 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7dc046db6be22eef18a273b77c92911e: Waiting for close lock at 1732148868345Running coprocessor pre-close hooks at 1732148868345Disabling compacts and flushes for region at 1732148868345Disabling writes for close at 1732148868345Obtaining lock to block concurrent updates at 1732148868345Preparing flush snapshotting stores in 7dc046db6be22eef18a273b77c92911e at 1732148868345Finished memstore snapshotting hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e., syncing WAL and waiting on mvcc, flushsize=dataSize=1492, getHeapSize=2960, getOffHeapSize=0, getCellsCount=10 at 1732148868345Flushing stores of hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. at 1732148868346 (+1 ms)Flushing 7dc046db6be22eef18a273b77c92911e/queue: creating writer at 1732148868346Flushing 7dc046db6be22eef18a273b77c92911e/queue: appending metadata at 1732148868368 (+22 ms)Flushing 7dc046db6be22eef18a273b77c92911e/queue: closing flushed file at 1732148868368Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37d0a2cc: reopening flushed file at 1732148868386 (+18 ms)Finished flush of dataSize ~1.46 KB/1492, heapSize ~2.42 KB/2480, currentSize=0 B/0 for 7dc046db6be22eef18a273b77c92911e in 49ms, sequenceid=14, compaction requested=false at 1732148868394 (+8 ms)Writing region close event to WAL at 1732148868396 (+2 ms)Running coprocessor post-close hooks at 1732148868412 (+16 ms)Closed at 1732148868413 (+1 ms) 2024-11-21T00:27:48,413 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148849711.7dc046db6be22eef18a273b77c92911e. 2024-11-21T00:27:48,432 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:27:48,483 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:48,632 DEBUG [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:27:48,654 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/info/2171a69fc60d4e1284b5f0e9e71356e4 2024-11-21T00:27:48,673 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/ns/00b20e2075bc4e4d9ca1e6c040e0df5f is 43, key is default/ns:d/1732148835876/Put/seqid=0 2024-11-21T00:27:48,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741845_1021 (size=5153) 2024-11-21T00:27:48,677 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/ns/00b20e2075bc4e4d9ca1e6c040e0df5f 2024-11-21T00:27:48,704 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/rep_barrier/165e6fc09f974018bfe53dae710753a3 is 112, key is test,,1732148843191.fa18a0bdbdf880445723239afb964dd9./rep_barrier:seqnumDuringOpen/1732148844422/Put/seqid=0 2024-11-21T00:27:48,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741846_1022 (size=5518) 2024-11-21T00:27:48,708 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/rep_barrier/165e6fc09f974018bfe53dae710753a3 2024-11-21T00:27:48,727 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/table/e1a1bc29446f4e7b9087ca01821a7856 is 53, key is hbase:replication/table:state/1732148850359/Put/seqid=0 2024-11-21T00:27:48,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741847_1023 (size=5308) 2024-11-21T00:27:48,734 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/table/e1a1bc29446f4e7b9087ca01821a7856 2024-11-21T00:27:48,738 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/info/2171a69fc60d4e1284b5f0e9e71356e4 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/info/2171a69fc60d4e1284b5f0e9e71356e4 2024-11-21T00:27:48,742 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/info/2171a69fc60d4e1284b5f0e9e71356e4, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:27:48,743 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/ns/00b20e2075bc4e4d9ca1e6c040e0df5f as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/ns/00b20e2075bc4e4d9ca1e6c040e0df5f 2024-11-21T00:27:48,748 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/ns/00b20e2075bc4e4d9ca1e6c040e0df5f, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:27:48,749 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/rep_barrier/165e6fc09f974018bfe53dae710753a3 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/rep_barrier/165e6fc09f974018bfe53dae710753a3 2024-11-21T00:27:48,753 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/rep_barrier/165e6fc09f974018bfe53dae710753a3, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:27:48,761 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/.tmp/table/e1a1bc29446f4e7b9087ca01821a7856 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/table/e1a1bc29446f4e7b9087ca01821a7856 2024-11-21T00:27:48,775 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/table/e1a1bc29446f4e7b9087ca01821a7856, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:27:48,776 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 547ms, sequenceid=16, compaction requested=false 2024-11-21T00:27:48,781 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:27:48,782 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:27:48,782 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:27:48,782 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:48,782 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148868228Running coprocessor pre-close hooks at 1732148868228Disabling compacts and flushes for region at 1732148868229 (+1 ms)Disabling writes for close at 1732148868229Obtaining lock to block concurrent updates at 1732148868229Preparing flush snapshotting stores in 1588230740 at 1732148868229Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148868229Flushing stores of hbase:meta,,1.1588230740 at 1732148868230 (+1 ms)Flushing 1588230740/info: creating writer at 1732148868230Flushing 1588230740/info: appending metadata at 1732148868247 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732148868247Flushing 1588230740/ns: creating writer at 1732148868658 (+411 ms)Flushing 1588230740/ns: appending metadata at 1732148868673 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732148868673Flushing 1588230740/rep_barrier: creating writer at 1732148868685 (+12 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148868703 (+18 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148868703Flushing 1588230740/table: creating writer at 1732148868712 (+9 ms)Flushing 1588230740/table: appending metadata at 1732148868726 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732148868726Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f792281: reopening flushed file at 1732148868738 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76d5b21c: reopening flushed file at 1732148868742 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b928c39: reopening flushed file at 1732148868749 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16bde685: reopening flushed file at 1732148868753 (+4 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 547ms, sequenceid=16, compaction requested=false at 1732148868776 (+23 ms)Writing region close event to WAL at 1732148868779 (+3 ms)Running coprocessor post-close hooks at 1732148868782 (+3 ms)Closed at 1732148868782 2024-11-21T00:27:48,782 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:48,784 WARN [BootstrapNodeManager {}] regionserver.BootstrapNodeManager(142): failed to get live region servers from master org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:41951 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.BootstrapNodeManager.getFromMaster(BootstrapNodeManager.java:140) ~[classes/:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$Stub.getLiveRegionServers(RegionServerStatusProtos.java:17191) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClusterConnectionImpl.getLiveRegionServers(AsyncClusterConnectionImpl.java:139) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.BootstrapNodeManager.getFromMaster(BootstrapNodeManager.java:140) ~[classes/:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 10 more 2024-11-21T00:27:48,790 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 1466, reset compression=false 2024-11-21T00:27:48,794 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:48,794 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.wal-reader.5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165 to pos 2357, reset compression=false 2024-11-21T00:27:48,794 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,44369,1732148833096 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.1732148835165, lastWalPosition=2357, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:48,795 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,44369,1732148833096: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,44369,1732148833096, walGroup=5ed4808ef0e6%2C44369%2C1732148833096, offset=5ed4808ef0e6%2C44369%2C1732148833096.1732148835165:2357, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:44369 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:27:48,796 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:27:48,796 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:27:48,796 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:27:48,797 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:27:48,797 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:27:48,797 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1319108608, "init": 1048576000, "max": 2306867200, "used": 838751272 }, "NonHeapMemoryUsage": { "committed": 203161600, "init": 7667712, "max": -1, "used": 200096160 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:27:48,797 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:41951 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:41951 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:27:48,832 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,44369,1732148833096; all regions closed. 2024-11-21T00:27:48,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:27:48,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741839_1015 (size=3552) 2024-11-21T00:27:48,841 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/WALs/5ed4808ef0e6,44369,1732148833096/5ed4808ef0e6%2C44369%2C1732148833096.rep.1732148850183 not finished, retry = 0 2024-11-21T00:27:48,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741832_1008 (size=2365) 2024-11-21T00:27:48,956 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:48,956 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:27:48,957 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:27:48,957 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:27:48,957 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:27:48,957 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:27:48,957 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,44369,1732148833096 because: Region server is closing 2024-11-21T00:27:48,958 INFO [RS:0;5ed4808ef0e6:44369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:44369. 2024-11-21T00:27:48,958 DEBUG [RS:0;5ed4808ef0e6:44369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:48,958 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:48,958 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:48,958 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:49,058 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,44369,1732148833096.replicationSource.shipper5ed4808ef0e6%2C44369%2C1732148833096,1-5ed4808ef0e6,44369,1732148833096 terminated 2024-11-21T00:27:49,059 INFO [RS:0;5ed4808ef0e6:44369 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:44369. 2024-11-21T00:27:49,059 DEBUG [RS:0;5ed4808ef0e6:44369 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:49,059 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:49,059 DEBUG [RS:0;5ed4808ef0e6:44369 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:49,059 INFO [RS:0;5ed4808ef0e6:44369 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44369 2024-11-21T00:27:49,059 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:49,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572/rs 2024-11-21T00:27:49,085 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-2137408572/rs/5ed4808ef0e6,44369,1732148833096 2024-11-21T00:27:49,085 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:27:49,096 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,44369,1732148833096] 2024-11-21T00:27:49,156 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-2137408572/draining/5ed4808ef0e6,44369,1732148833096 already deleted, retry=false 2024-11-21T00:27:49,156 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,44369,1732148833096 expired; onlineServers=0 2024-11-21T00:27:49,156 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,41951,1732148832855' ***** 2024-11-21T00:27:49,156 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:27:49,156 INFO [M:0;5ed4808ef0e6:41951 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:27:49,156 INFO [M:0;5ed4808ef0e6:41951 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:27:49,156 DEBUG [M:0;5ed4808ef0e6:41951 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:27:49,156 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:27:49,156 DEBUG [M:0;5ed4808ef0e6:41951 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:27:49,156 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148834868 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148834868,5,FailOnTimeoutGroup] 2024-11-21T00:27:49,156 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148834857 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148834857,5,FailOnTimeoutGroup] 2024-11-21T00:27:49,156 INFO [M:0;5ed4808ef0e6:41951 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:27:49,156 INFO [M:0;5ed4808ef0e6:41951 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:27:49,157 DEBUG [M:0;5ed4808ef0e6:41951 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:27:49,157 INFO [M:0;5ed4808ef0e6:41951 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:27:49,157 INFO [M:0;5ed4808ef0e6:41951 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:27:49,157 INFO [M:0;5ed4808ef0e6:41951 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:27:49,157 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:27:49,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-2137408572/master 2024-11-21T00:27:49,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-2137408572 2024-11-21T00:27:49,177 DEBUG [M:0;5ed4808ef0e6:41951 {}] zookeeper.ZKUtil(347): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Unable to get data of znode /1-2137408572/master because node does not exist (not an error) 2024-11-21T00:27:49,177 WARN [M:0;5ed4808ef0e6:41951 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:27:49,178 INFO [M:0;5ed4808ef0e6:41951 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/.lastflushedseqids 2024-11-21T00:27:49,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741848_1024 (size=245) 2024-11-21T00:27:49,182 INFO [M:0;5ed4808ef0e6:41951 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:27:49,182 INFO [M:0;5ed4808ef0e6:41951 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:27:49,182 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:49,182 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:49,182 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:49,182 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:49,182 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:49,182 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.09 KB heapSize=64.93 KB 2024-11-21T00:27:49,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:49,196 INFO [RS:0;5ed4808ef0e6:44369 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:27:49,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44369-0x1015ac95b3a0004, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:49,196 INFO [RS:0;5ed4808ef0e6:44369 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,44369,1732148833096; zookeeper connection closed. 2024-11-21T00:27:49,196 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7911b250 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7911b250 2024-11-21T00:27:49,196 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:27:49,200 DEBUG [M:0;5ed4808ef0e6:41951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1d042804050e4b18b56cdd9eb8d23032 is 82, key is hbase:meta,,1/info:regioninfo/1732148835853/Put/seqid=0 2024-11-21T00:27:49,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741849_1025 (size=5672) 2024-11-21T00:27:49,287 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:49,605 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1d042804050e4b18b56cdd9eb8d23032 2024-11-21T00:27:49,626 DEBUG [M:0;5ed4808ef0e6:41951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e8b7c911713f43788a0bd9125fe006cd is 1247, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732148844431/Put/seqid=0 2024-11-21T00:27:49,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741850_1026 (size=7219) 2024-11-21T00:27:49,640 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=54.54 KB at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e8b7c911713f43788a0bd9125fe006cd 2024-11-21T00:27:49,646 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e8b7c911713f43788a0bd9125fe006cd 2024-11-21T00:27:49,667 DEBUG [M:0;5ed4808ef0e6:41951 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ef12230fdce34bf08b2b705937eeb7fc is 69, key is 5ed4808ef0e6,44369,1732148833096/rs:state/1732148834881/Put/seqid=0 2024-11-21T00:27:49,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741851_1027 (size=5156) 2024-11-21T00:27:49,682 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=103 (bloomFilter=true), to=hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ef12230fdce34bf08b2b705937eeb7fc 2024-11-21T00:27:49,692 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1d042804050e4b18b56cdd9eb8d23032 as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1d042804050e4b18b56cdd9eb8d23032 2024-11-21T00:27:49,701 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1d042804050e4b18b56cdd9eb8d23032, entries=8, sequenceid=103, filesize=5.5 K 2024-11-21T00:27:49,702 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e8b7c911713f43788a0bd9125fe006cd as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e8b7c911713f43788a0bd9125fe006cd 2024-11-21T00:27:49,715 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e8b7c911713f43788a0bd9125fe006cd 2024-11-21T00:27:49,715 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e8b7c911713f43788a0bd9125fe006cd, entries=11, sequenceid=103, filesize=7.0 K 2024-11-21T00:27:49,723 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ef12230fdce34bf08b2b705937eeb7fc as hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ef12230fdce34bf08b2b705937eeb7fc 2024-11-21T00:27:49,727 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46065/user/jenkins/test-data/69a7467d-992e-60a7-6b3c-11556f4ac1ea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ef12230fdce34bf08b2b705937eeb7fc, entries=1, sequenceid=103, filesize=5.0 K 2024-11-21T00:27:49,728 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.09 KB/56417, heapSize ~64.63 KB/66184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 546ms, sequenceid=103, compaction requested=false 2024-11-21T00:27:49,748 INFO [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:49,748 DEBUG [M:0;5ed4808ef0e6:41951 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148869182Disabling compacts and flushes for region at 1732148869182Disabling writes for close at 1732148869182Obtaining lock to block concurrent updates at 1732148869182Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148869182Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=56417, getHeapSize=66424, getOffHeapSize=0, getCellsCount=120 at 1732148869182Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148869183 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148869183Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148869199 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148869199Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148869609 (+410 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148869626 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148869626Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148869646 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148869666 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148869666Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fd684b8: reopening flushed file at 1732148869690 (+24 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39803fe8: reopening flushed file at 1732148869701 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c07a510: reopening flushed file at 1732148869715 (+14 ms)Finished flush of dataSize ~55.09 KB/56417, heapSize ~64.63 KB/66184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 546ms, sequenceid=103, compaction requested=false at 1732148869728 (+13 ms)Writing region close event to WAL at 1732148869748 (+20 ms)Closed at 1732148869748 2024-11-21T00:27:49,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37973 is added to blk_1073741830_1006 (size=63620) 2024-11-21T00:27:49,759 INFO [M:0;5ed4808ef0e6:41951 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:27:49,760 INFO [M:0;5ed4808ef0e6:41951 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41951 2024-11-21T00:27:49,760 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:27:49,760 INFO [M:0;5ed4808ef0e6:41951 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:27:49,806 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-21T00:27:49,806 INFO [master/5ed4808ef0e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-21T00:27:49,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:49,985 INFO [M:0;5ed4808ef0e6:41951 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:27:49,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41951-0x1015ac95b3a0003, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:49,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1028ab63{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:27:49,991 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6bfba3eb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:27:49,991 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:27:49,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4074e033{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:27:49,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68e9eb4a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.log.dir/,STOPPED} 2024-11-21T00:27:49,994 WARN [BP-2008586648-172.17.0.2-1732148830009 heartbeating to localhost/127.0.0.1:46065 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:27:49,994 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:27:49,994 WARN [BP-2008586648-172.17.0.2-1732148830009 heartbeating to localhost/127.0.0.1:46065 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2008586648-172.17.0.2-1732148830009 (Datanode Uuid 45c5d1b8-a541-4b1e-9df1-9bb0a8fd5235) service to localhost/127.0.0.1:46065 2024-11-21T00:27:49,994 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:27:49,994 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/cluster_e7ac7e7e-01db-540e-7a4f-79aaa6d6cbb7/data/data1/current/BP-2008586648-172.17.0.2-1732148830009 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:27:49,995 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/cluster_e7ac7e7e-01db-540e-7a4f-79aaa6d6cbb7/data/data2/current/BP-2008586648-172.17.0.2-1732148830009 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:27:49,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:27:50,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@391811d5{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:27:50,005 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b7fb873{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:27:50,005 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:27:50,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a51b703{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:27:50,005 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44bf1bb8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/b9ba2b2b-8d81-2e71-8352-63dd1dc36acc/hadoop.log.dir/,STOPPED} 2024-11-21T00:27:50,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:27:50,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:27:50,028 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:50,028 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:288) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:50,029 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:50,029 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:50,029 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:50,029 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:27:50,029 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1418206146, stopped=false 2024-11-21T00:27:50,029 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,42819,1732148825926 2024-11-21T00:27:50,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01148209107/running 2024-11-21T00:27:50,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01148209107/running 2024-11-21T00:27:50,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:50,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:50,043 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:27:50,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on znode that does not yet exist, /01148209107/running 2024-11-21T00:27:50,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Set watcher on znode that does not yet exist, /01148209107/running 2024-11-21T00:27:50,045 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:27:50,045 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication2(TestMasterReplication.java:288) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:50,045 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:50,045 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,33853,1732148826420' ***** 2024-11-21T00:27:50,045 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:27:50,045 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:27:50,045 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(3091): Received CLOSE for 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(3091): Received CLOSE for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33853. 2024-11-21T00:27:50,046 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 69eb84528d7bfcc8bfbb1997d42d5d19, disabling compactions & flushes 2024-11-21T00:27:50,046 DEBUG [RS:0;5ed4808ef0e6:33853 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:50,046 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:50,046 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:50,046 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:27:50,046 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. after waiting 0 ms 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:27:50,046 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:27:50,046 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:27:50,046 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 69eb84528d7bfcc8bfbb1997d42d5d19 3/3 column families, dataSize=341 B heapSize=1.99 KB 2024-11-21T00:27:50,047 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:27:50,047 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 69eb84528d7bfcc8bfbb1997d42d5d19=test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19., 0878e017eb460e018013f072af40b5c7=hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7.} 2024-11-21T00:27:50,047 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1351): Waiting on 0878e017eb460e018013f072af40b5c7, 1588230740, 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:50,047 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:50,047 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:50,047 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:50,047 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:50,047 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:50,047 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:27:50,063 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/info/a0b06c481f78441dbcefd5f14809de48 is 147, key is hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7./info:regioninfo/1732148848477/Put/seqid=0 2024-11-21T00:27:50,065 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/f/1d856e7b4ee44e579b1c25aa26127f4c is 37, key is row3/f:row3/1732148863873/Put/seqid=0 2024-11-21T00:27:50,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741840_1016 (size=7686) 2024-11-21T00:27:50,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741841_1017 (size=5228) 2024-11-21T00:27:50,086 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:27:50,142 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:27:50,195 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1491, reset compression=false 2024-11-21T00:27:50,202 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:50,202 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1738, reset compression=false 2024-11-21T00:27:50,203 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,33853,1732148826420 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869, lastWalPosition=1738, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:27:50,204 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,33853,1732148826420: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,33853,1732148826420, walGroup=5ed4808ef0e6%2C33853%2C1732148826420, offset=5ed4808ef0e6%2C33853%2C1732148826420.1732148828869:1738, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:33853 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:27:50,205 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:27:50,205 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:27:50,205 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:27:50,205 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:27:50,205 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:27:50,205 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1319108608, "init": 1048576000, "max": 2306867200, "used": 870208552 }, "NonHeapMemoryUsage": { "committed": 203292672, "init": 7667712, "max": -1, "used": 200315136 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:27:50,206 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:42819 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:42819 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:27:50,247 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1351): Waiting on 0878e017eb460e018013f072af40b5c7, 1588230740, 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:50,421 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1738, reset compression=false 2024-11-21T00:27:50,447 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1351): Waiting on 0878e017eb460e018013f072af40b5c7, 1588230740, 69eb84528d7bfcc8bfbb1997d42d5d19 2024-11-21T00:27:50,475 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=236 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/f/1d856e7b4ee44e579b1c25aa26127f4c 2024-11-21T00:27:50,475 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/info/a0b06c481f78441dbcefd5f14809de48 2024-11-21T00:27:50,479 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1d856e7b4ee44e579b1c25aa26127f4c 2024-11-21T00:27:50,496 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/f1/9548edecf3924ad5b27460e13916b977 is 30, key is row1/f1:/1732148861190/DeleteFamily/seqid=0 2024-11-21T00:27:50,498 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/ns/5b4ef377fd064259b8c8aa385712d988 is 43, key is default/ns:d/1732148829780/Put/seqid=0 2024-11-21T00:27:50,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741842_1018 (size=5158) 2024-11-21T00:27:50,501 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=77 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/f1/9548edecf3924ad5b27460e13916b977 2024-11-21T00:27:50,505 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9548edecf3924ad5b27460e13916b977 2024-11-21T00:27:50,522 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/norep/251f9ae04c654ef68ede35e73d81ee80 is 32, key is row/norep:/1732148860183/DeleteFamily/seqid=0 2024-11-21T00:27:50,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741843_1019 (size=5153) 2024-11-21T00:27:50,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741844_1020 (size=5101) 2024-11-21T00:27:50,539 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/norep/251f9ae04c654ef68ede35e73d81ee80 2024-11-21T00:27:50,543 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 251f9ae04c654ef68ede35e73d81ee80 2024-11-21T00:27:50,544 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/f/1d856e7b4ee44e579b1c25aa26127f4c as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/f/1d856e7b4ee44e579b1c25aa26127f4c 2024-11-21T00:27:50,548 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1d856e7b4ee44e579b1c25aa26127f4c 2024-11-21T00:27:50,548 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/f/1d856e7b4ee44e579b1c25aa26127f4c, entries=5, sequenceid=12, filesize=5.1 K 2024-11-21T00:27:50,549 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/f1/9548edecf3924ad5b27460e13916b977 as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/f1/9548edecf3924ad5b27460e13916b977 2024-11-21T00:27:50,554 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9548edecf3924ad5b27460e13916b977 2024-11-21T00:27:50,554 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/f1/9548edecf3924ad5b27460e13916b977, entries=3, sequenceid=12, filesize=5.0 K 2024-11-21T00:27:50,555 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/.tmp/norep/251f9ae04c654ef68ede35e73d81ee80 as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/norep/251f9ae04c654ef68ede35e73d81ee80 2024-11-21T00:27:50,559 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 251f9ae04c654ef68ede35e73d81ee80 2024-11-21T00:27:50,559 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/norep/251f9ae04c654ef68ede35e73d81ee80, entries=1, sequenceid=12, filesize=5.0 K 2024-11-21T00:27:50,560 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~341 B/341, heapSize ~1.95 KB/1992, currentSize=0 B/0 for 69eb84528d7bfcc8bfbb1997d42d5d19 in 514ms, sequenceid=12, compaction requested=false 2024-11-21T00:27:50,564 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/default/test/69eb84528d7bfcc8bfbb1997d42d5d19/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-11-21T00:27:50,564 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:27:50,564 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:27:50,564 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 69eb84528d7bfcc8bfbb1997d42d5d19: Waiting for close lock at 1732148870046Running coprocessor pre-close hooks at 1732148870046Disabling compacts and flushes for region at 1732148870046Disabling writes for close at 1732148870046Obtaining lock to block concurrent updates at 1732148870046Preparing flush snapshotting stores in 69eb84528d7bfcc8bfbb1997d42d5d19 at 1732148870046Finished memstore snapshotting test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19., syncing WAL and waiting on mvcc, flushsize=dataSize=341, getHeapSize=1992, getOffHeapSize=0, getCellsCount=12 at 1732148870047 (+1 ms)Flushing stores of test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. at 1732148870047Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/f: creating writer at 1732148870048 (+1 ms)Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/f: appending metadata at 1732148870064 (+16 ms)Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/f: closing flushed file at 1732148870064Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/f1: creating writer at 1732148870479 (+415 ms)Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/f1: appending metadata at 1732148870495 (+16 ms)Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/f1: closing flushed file at 1732148870495Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/norep: creating writer at 1732148870506 (+11 ms)Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/norep: appending metadata at 1732148870521 (+15 ms)Flushing 69eb84528d7bfcc8bfbb1997d42d5d19/norep: closing flushed file at 1732148870521Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34710da4: reopening flushed file at 1732148870543 (+22 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@377f7789: reopening flushed file at 1732148870548 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c9fdf62: reopening flushed file at 1732148870554 (+6 ms)Finished flush of dataSize ~341 B/341, heapSize ~1.95 KB/1992, currentSize=0 B/0 for 69eb84528d7bfcc8bfbb1997d42d5d19 in 514ms, sequenceid=12, compaction requested=false at 1732148870560 (+6 ms)Writing region close event to WAL at 1732148870561 (+1 ms)Running coprocessor post-close hooks at 1732148870564 (+3 ms)Closed at 1732148870564 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19. 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0878e017eb460e018013f072af40b5c7, disabling compactions & flushes 2024-11-21T00:27:50,565 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. after waiting 0 ms 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0878e017eb460e018013f072af40b5c7: Waiting for close lock at 1732148870565Running coprocessor pre-close hooks at 1732148870565Disabling compacts and flushes for region at 1732148870565Disabling writes for close at 1732148870565Failed flush hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7., putting online again at 1732148870565 2024-11-21T00:27:50,565 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2435): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,647 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(3091): Received CLOSE for 0878e017eb460e018013f072af40b5c7 2024-11-21T00:27:50,647 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1351): Waiting on 0878e017eb460e018013f072af40b5c7, 1588230740 2024-11-21T00:27:50,648 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0878e017eb460e018013f072af40b5c7, disabling compactions & flushes 2024-11-21T00:27:50,648 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,648 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,648 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. after waiting 0 ms 2024-11-21T00:27:50,648 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,648 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0878e017eb460e018013f072af40b5c7: Waiting for close lock at 1732148870647Running coprocessor pre-close hooks at 1732148870647Disabling compacts and flushes for region at 1732148870647Disabling writes for close at 1732148870648 (+1 ms)Failed flush hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7., putting online again at 1732148870648 2024-11-21T00:27:50,648 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2435): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7. 2024-11-21T00:27:50,709 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:27:50,709 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:27:50,728 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 1738, reset compression=false 2024-11-21T00:27:50,740 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:27:50,740 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 2382, reset compression=false 2024-11-21T00:27:50,848 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:27:50,938 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/ns/5b4ef377fd064259b8c8aa385712d988 2024-11-21T00:27:50,944 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.wal-reader.5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/WALs/5ed4808ef0e6,33853,1732148826420/5ed4808ef0e6%2C33853%2C1732148826420.1732148828869 to pos 2382, reset compression=false 2024-11-21T00:27:50,981 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/rep_barrier/3dc9f1f941464ef5ad9ecd9b51237096 is 112, key is test,,1732148842046.69eb84528d7bfcc8bfbb1997d42d5d19./rep_barrier:seqnumDuringOpen/1732148842806/Put/seqid=0 2024-11-21T00:27:50,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741845_1021 (size=5518) 2024-11-21T00:27:50,985 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/rep_barrier/3dc9f1f941464ef5ad9ecd9b51237096 2024-11-21T00:27:51,011 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/table/1c5a849ff23848c493197e0de8d1595a is 53, key is hbase:replication/table:state/1732148848519/Put/seqid=0 2024-11-21T00:27:51,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741846_1022 (size=5308) 2024-11-21T00:27:51,015 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/table/1c5a849ff23848c493197e0de8d1595a 2024-11-21T00:27:51,021 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/info/a0b06c481f78441dbcefd5f14809de48 as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/info/a0b06c481f78441dbcefd5f14809de48 2024-11-21T00:27:51,026 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/info/a0b06c481f78441dbcefd5f14809de48, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:27:51,026 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/ns/5b4ef377fd064259b8c8aa385712d988 as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/ns/5b4ef377fd064259b8c8aa385712d988 2024-11-21T00:27:51,036 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/ns/5b4ef377fd064259b8c8aa385712d988, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:27:51,041 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/rep_barrier/3dc9f1f941464ef5ad9ecd9b51237096 as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/rep_barrier/3dc9f1f941464ef5ad9ecd9b51237096 2024-11-21T00:27:51,048 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:27:51,048 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0878e017eb460e018013f072af40b5c7=hbase:replication,,1732148847530.0878e017eb460e018013f072af40b5c7.} 2024-11-21T00:27:51,048 DEBUG [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:27:51,057 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/rep_barrier/3dc9f1f941464ef5ad9ecd9b51237096, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:27:51,058 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/.tmp/table/1c5a849ff23848c493197e0de8d1595a as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/table/1c5a849ff23848c493197e0de8d1595a 2024-11-21T00:27:51,063 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/table/1c5a849ff23848c493197e0de8d1595a, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:27:51,064 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1017ms, sequenceid=16, compaction requested=false 2024-11-21T00:27:51,064 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:27:51,074 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:27:51,075 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:27:51,075 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:27:51,075 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:51,075 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148870047Running coprocessor pre-close hooks at 1732148870047Disabling compacts and flushes for region at 1732148870047Disabling writes for close at 1732148870047Obtaining lock to block concurrent updates at 1732148870047Preparing flush snapshotting stores in 1588230740 at 1732148870047Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148870047Flushing stores of hbase:meta,,1.1588230740 at 1732148870048 (+1 ms)Flushing 1588230740/info: creating writer at 1732148870048Flushing 1588230740/info: appending metadata at 1732148870063 (+15 ms)Flushing 1588230740/info: closing flushed file at 1732148870063Flushing 1588230740/ns: creating writer at 1732148870479 (+416 ms)Flushing 1588230740/ns: appending metadata at 1732148870498 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1732148870498Flushing 1588230740/rep_barrier: creating writer at 1732148870955 (+457 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148870981 (+26 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148870981Flushing 1588230740/table: creating writer at 1732148870989 (+8 ms)Flushing 1588230740/table: appending metadata at 1732148871011 (+22 ms)Flushing 1588230740/table: closing flushed file at 1732148871011Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78337281: reopening flushed file at 1732148871021 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dc53385: reopening flushed file at 1732148871026 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c91e161: reopening flushed file at 1732148871036 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2107efc6: reopening flushed file at 1732148871058 (+22 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1017ms, sequenceid=16, compaction requested=false at 1732148871064 (+6 ms)Writing region close event to WAL at 1732148871068 (+4 ms)Running coprocessor post-close hooks at 1732148871075 (+7 ms)Closed at 1732148871075 2024-11-21T00:27:51,075 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:51,248 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1346): We were exiting though online regions are not empty, because some regions failed closing 2024-11-21T00:27:51,248 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,33853,1732148826420; all regions closed. 2024-11-21T00:27:51,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:27:51,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741839_1015 (size=2609) 2024-11-21T00:27:51,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741832_1008 (size=2390) 2024-11-21T00:27:51,257 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:51,257 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:27:51,257 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:27:51,257 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:27:51,257 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:27:51,258 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,33853,1732148826420 because: Region server is closing 2024-11-21T00:27:51,258 INFO [RS:0;5ed4808ef0e6:33853 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33853. 2024-11-21T00:27:51,258 DEBUG [RS:0;5ed4808ef0e6:33853 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:51,258 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:51,258 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:51,258 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:51,258 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:27:51,358 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33853,1732148826420.replicationSource.shipper5ed4808ef0e6%2C33853%2C1732148826420,1-5ed4808ef0e6,33853,1732148826420 terminated 2024-11-21T00:27:51,358 INFO [RS:0;5ed4808ef0e6:33853 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:33853. 2024-11-21T00:27:51,358 DEBUG [RS:0;5ed4808ef0e6:33853 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:27:51,359 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:51,359 DEBUG [RS:0;5ed4808ef0e6:33853 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:51,359 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:27:51,359 INFO [RS:0;5ed4808ef0e6:33853 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33853 2024-11-21T00:27:51,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107/rs 2024-11-21T00:27:51,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01148209107/rs/5ed4808ef0e6,33853,1732148826420 2024-11-21T00:27:51,456 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:27:51,464 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,33853,1732148826420] 2024-11-21T00:27:51,474 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /01148209107/draining/5ed4808ef0e6,33853,1732148826420 already deleted, retry=false 2024-11-21T00:27:51,474 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,33853,1732148826420 expired; onlineServers=0 2024-11-21T00:27:51,474 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,42819,1732148825926' ***** 2024-11-21T00:27:51,474 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:27:51,475 INFO [M:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:27:51,475 INFO [M:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:27:51,475 DEBUG [M:0;5ed4808ef0e6:42819 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:27:51,475 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:27:51,475 DEBUG [M:0;5ed4808ef0e6:42819 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:27:51,475 INFO [M:0;5ed4808ef0e6:42819 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:27:51,475 INFO [M:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:27:51,475 DEBUG [M:0;5ed4808ef0e6:42819 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:27:51,475 INFO [M:0;5ed4808ef0e6:42819 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:27:51,475 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148828531 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148828531,5,FailOnTimeoutGroup] 2024-11-21T00:27:51,475 INFO [M:0;5ed4808ef0e6:42819 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:27:51,475 ERROR [M:0;5ed4808ef0e6:42819 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] 2024-11-21T00:27:51,475 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148828531 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148828531,5,FailOnTimeoutGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:37293,5,PEWorkerGroup] 2024-11-21T00:27:51,475 INFO [M:0;5ed4808ef0e6:42819 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:27:51,476 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:27:51,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01148209107/master 2024-11-21T00:27:51,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01148209107 2024-11-21T00:27:51,497 DEBUG [M:0;5ed4808ef0e6:42819 {}] zookeeper.RecoverableZooKeeper(212): Node /01148209107/master already deleted, retry=false 2024-11-21T00:27:51,498 DEBUG [M:0;5ed4808ef0e6:42819 {}] master.ActiveMasterManager(353): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Failed delete of our master address node; KeeperErrorCode = NoNode for /01148209107/master 2024-11-21T00:27:51,514 INFO [M:0;5ed4808ef0e6:42819 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/.lastflushedseqids 2024-11-21T00:27:51,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741847_1023 (size=245) 2024-11-21T00:27:51,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:51,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33853-0x1015ac95b3a0001, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:51,565 INFO [RS:0;5ed4808ef0e6:33853 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:27:51,565 INFO [RS:0;5ed4808ef0e6:33853 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,33853,1732148826420; zookeeper connection closed. 2024-11-21T00:27:51,570 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7479ab16 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7479ab16 2024-11-21T00:27:51,570 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:27:51,941 INFO [M:0;5ed4808ef0e6:42819 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:27:51,941 INFO [M:0;5ed4808ef0e6:42819 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:27:51,941 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:51,941 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:51,941 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:51,941 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:51,941 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:51,941 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.75 KB heapSize=65.74 KB 2024-11-21T00:27:51,960 DEBUG [M:0;5ed4808ef0e6:42819 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9fd787b1601147e396cac60ce47c1182 is 82, key is hbase:meta,,1/info:regioninfo/1732148829682/Put/seqid=0 2024-11-21T00:27:51,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741848_1024 (size=5672) 2024-11-21T00:27:51,982 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9fd787b1601147e396cac60ce47c1182 2024-11-21T00:27:52,013 DEBUG [M:0;5ed4808ef0e6:42819 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8690ffe5bb544c2a8d22b87fe35db65e is 1246, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732148842822/Put/seqid=0 2024-11-21T00:27:52,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741849_1025 (size=7218) 2024-11-21T00:27:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,021 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,065 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,066 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,074 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,581 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.20 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8690ffe5bb544c2a8d22b87fe35db65e 2024-11-21T00:27:52,585 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8690ffe5bb544c2a8d22b87fe35db65e 2024-11-21T00:27:52,600 DEBUG [M:0;5ed4808ef0e6:42819 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d9c6eea224842f4b5683b9dccddff4f is 69, key is 5ed4808ef0e6,33853,1732148826420/rs:state/1732148828579/Put/seqid=0 2024-11-21T00:27:52,602 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:27:52,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741850_1026 (size=5156) 2024-11-21T00:27:52,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,645 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,646 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,654 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:52,660 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:27:53,004 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d9c6eea224842f4b5683b9dccddff4f 2024-11-21T00:27:53,008 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/9fd787b1601147e396cac60ce47c1182 as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9fd787b1601147e396cac60ce47c1182 2024-11-21T00:27:53,012 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/9fd787b1601147e396cac60ce47c1182, entries=8, sequenceid=105, filesize=5.5 K 2024-11-21T00:27:53,012 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8690ffe5bb544c2a8d22b87fe35db65e as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8690ffe5bb544c2a8d22b87fe35db65e 2024-11-21T00:27:53,016 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8690ffe5bb544c2a8d22b87fe35db65e 2024-11-21T00:27:53,016 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8690ffe5bb544c2a8d22b87fe35db65e, entries=11, sequenceid=105, filesize=7.0 K 2024-11-21T00:27:53,016 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6d9c6eea224842f4b5683b9dccddff4f as hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6d9c6eea224842f4b5683b9dccddff4f 2024-11-21T00:27:53,020 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37293/user/jenkins/test-data/f6141051-c7df-4467-dc2b-b90a1307854b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6d9c6eea224842f4b5683b9dccddff4f, entries=1, sequenceid=105, filesize=5.0 K 2024-11-21T00:27:53,021 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.75 KB/57092, heapSize ~65.45 KB/67016, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1080ms, sequenceid=105, compaction requested=false 2024-11-21T00:27:53,039 INFO [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:53,039 DEBUG [M:0;5ed4808ef0e6:42819 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148871941Disabling compacts and flushes for region at 1732148871941Disabling writes for close at 1732148871941Obtaining lock to block concurrent updates at 1732148871941Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148871941Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=57092, getHeapSize=67256, getOffHeapSize=0, getCellsCount=122 at 1732148871941Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148871942 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148871942Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148871959 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148871959Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148871989 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148872012 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148872012Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148872585 (+573 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148872600 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148872600Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37723927: reopening flushed file at 1732148873008 (+408 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34b73ac9: reopening flushed file at 1732148873012 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@29e87da2: reopening flushed file at 1732148873016 (+4 ms)Finished flush of dataSize ~55.75 KB/57092, heapSize ~65.45 KB/67016, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1080ms, sequenceid=105, compaction requested=false at 1732148873021 (+5 ms)Writing region close event to WAL at 1732148873039 (+18 ms)Closed at 1732148873039 2024-11-21T00:27:53,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33273 is added to blk_1073741830_1006 (size=64423) 2024-11-21T00:27:53,043 INFO [M:0;5ed4808ef0e6:42819 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:27:53,043 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:27:53,043 INFO [M:0;5ed4808ef0e6:42819 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42819 2024-11-21T00:27:53,044 INFO [M:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:27:53,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:53,195 INFO [M:0;5ed4808ef0e6:42819 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:27:53,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42819-0x1015ac95b3a0000, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:27:53,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@396e52d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:27:53,198 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a7e3235{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:27:53,198 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:27:53,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f29cd30{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:27:53,198 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68ca5029{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.log.dir/,STOPPED} 2024-11-21T00:27:53,200 WARN [BP-1035257430-172.17.0.2-1732148813734 heartbeating to localhost/127.0.0.1:37293 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:27:53,200 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:27:53,200 WARN [BP-1035257430-172.17.0.2-1732148813734 heartbeating to localhost/127.0.0.1:37293 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1035257430-172.17.0.2-1732148813734 (Datanode Uuid 9f5180f9-d8d7-46f9-8cc7-2da3c71c59fa) service to localhost/127.0.0.1:37293 2024-11-21T00:27:53,200 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:27:53,200 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6/data/data1/current/BP-1035257430-172.17.0.2-1732148813734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:27:53,200 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/cluster_d0b2d3e3-7673-663c-4a1c-a05de2c688a6/data/data2/current/BP-1035257430-172.17.0.2-1732148813734 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:27:53,201 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:27:53,208 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@754f6ac0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:27:53,209 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@661cc5d6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:27:53,209 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:27:53,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64fb5b2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:27:53,209 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b7d156c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dd3b99c1-371c-b957-0067-23bd980ba8d8/hadoop.log.dir/,STOPPED} 2024-11-21T00:27:53,222 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:27:53,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:27:53,247 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testCyclicReplication2 Thread=550 (was 495) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45089 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:50082) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: nioEventLoopGroup-26-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46065 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-35-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-35-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-36-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:45089 from jenkins.hfs.16 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46065 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.14@localhost:37293 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-32-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:50082) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-34-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-36-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-27-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46065 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-27-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-32-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:45089 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-31-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46065 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-33-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.16@localhost:45089 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-34-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-32-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-31-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46065 from jenkins.hfs.15 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-36-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:37293 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-26-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:37293 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:50082) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: nioEventLoopGroup-27-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:45089 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-34-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37293 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-33-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-33-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46065 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46065 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:37293 from jenkins.hfs.14 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.15@localhost:46065 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45089 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-31-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-35-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=880 (was 805) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=991 (was 825) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=505 (was 1350) 2024-11-21T00:27:53,249 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=550 is superior to 500 2024-11-21T00:27:53,261 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testCyclicReplication3 Thread=550, OpenFileDescriptor=880, MaxFileDescriptor=1048576, SystemLoadAverage=991, ProcessCount=11, AvailableMemoryMB=505 2024-11-21T00:27:53,261 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=550 is superior to 500 2024-11-21T00:27:53,275 INFO [Time-limited test {}] replication.TestMasterReplication(410): testCyclicReplication2 2024-11-21T00:27:53,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.log.dir so I do NOT create it in target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11 2024-11-21T00:27:53,276 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:27:53,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.tmp.dir so I do NOT create it in target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11 2024-11-21T00:27:53,276 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d4a177bc-3d77-e5f1-7102-13c63d8c6167/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:27:53,276 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11 2024-11-21T00:27:53,276 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f, deleteOnExit=true 2024-11-21T00:27:53,279 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/zookeeper_0, clientPort=60103, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:27:53,279 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60103 2024-11-21T00:27:53,279 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/test.cache.data in system properties and HBase conf 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:27:53,280 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:53,280 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:27:53,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:27:53,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015ac95b3a0005, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:27:53,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015ac95b3a0005, quorum=127.0.0.1:50082, baseZNode=/1-2137408572 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:27:53,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015ac95b3a0002, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:27:53,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster2-0x1015ac95b3a0008, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:27:53,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015ac95b3a0002, quorum=127.0.0.1:50082, baseZNode=/01148209107 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:27:53,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster2-0x1015ac95b3a0008, quorum=127.0.0.1:50082, baseZNode=/2-559595819 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:27:53,951 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:53,955 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:27:53,964 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:27:53,964 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:27:53,965 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:27:53,969 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:53,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bbc206{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:27:53,973 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1afd8979{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:27:54,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@28a54498{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/java.io.tmpdir/jetty-localhost-37859-hadoop-hdfs-3_4_1-tests_jar-_-any-1160319147211353955/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:27:54,088 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@f9fe0bd{HTTP/1.1, (http/1.1)}{localhost:37859} 2024-11-21T00:27:54,089 INFO [Time-limited test {}] server.Server(415): Started @608638ms 2024-11-21T00:27:54,487 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:54,489 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:27:54,503 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:27:54,503 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:27:54,503 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:27:54,504 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a453c14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:27:54,505 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13cefcc8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:27:54,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2f7bb3d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/java.io.tmpdir/jetty-localhost-40943-hadoop-hdfs-3_4_1-tests_jar-_-any-1705102245078697113/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:27:54,632 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@24595238{HTTP/1.1, (http/1.1)}{localhost:40943} 2024-11-21T00:27:54,632 INFO [Time-limited test {}] server.Server(415): Started @609181ms 2024-11-21T00:27:54,634 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:27:55,631 WARN [Thread-3491 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/data/data1/current/BP-177379055-172.17.0.2-1732148873310/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:55,631 WARN [Thread-3492 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/data/data2/current/BP-177379055-172.17.0.2-1732148873310/current, will proceed with Du for space computation calculation, 2024-11-21T00:27:55,652 WARN [Thread-3479 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:27:55,658 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x588d3ecb56163624 with lease ID 0x3734313e92edd09e: Processing first storage report for DS-8b268792-34c9-45e7-a4c2-f58b45d879c0 from datanode DatanodeRegistration(127.0.0.1:42735, datanodeUuid=6a723d48-d1fb-4285-a935-9132bf25010a, infoPort=45503, infoSecurePort=0, ipcPort=38799, storageInfo=lv=-57;cid=testClusterID;nsid=1353495949;c=1732148873310) 2024-11-21T00:27:55,658 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x588d3ecb56163624 with lease ID 0x3734313e92edd09e: from storage DS-8b268792-34c9-45e7-a4c2-f58b45d879c0 node DatanodeRegistration(127.0.0.1:42735, datanodeUuid=6a723d48-d1fb-4285-a935-9132bf25010a, infoPort=45503, infoSecurePort=0, ipcPort=38799, storageInfo=lv=-57;cid=testClusterID;nsid=1353495949;c=1732148873310), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-21T00:27:55,658 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x588d3ecb56163624 with lease ID 0x3734313e92edd09e: Processing first storage report for DS-06c44b41-a6d5-4b59-ae9e-7600de5a6609 from datanode DatanodeRegistration(127.0.0.1:42735, datanodeUuid=6a723d48-d1fb-4285-a935-9132bf25010a, infoPort=45503, infoSecurePort=0, ipcPort=38799, storageInfo=lv=-57;cid=testClusterID;nsid=1353495949;c=1732148873310) 2024-11-21T00:27:55,658 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x588d3ecb56163624 with lease ID 0x3734313e92edd09e: from storage DS-06c44b41-a6d5-4b59-ae9e-7600de5a6609 node DatanodeRegistration(127.0.0.1:42735, datanodeUuid=6a723d48-d1fb-4285-a935-9132bf25010a, infoPort=45503, infoSecurePort=0, ipcPort=38799, storageInfo=lv=-57;cid=testClusterID;nsid=1353495949;c=1732148873310), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:27:55,680 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11 2024-11-21T00:27:55,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:55,682 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:55,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:27:56,089 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5 with version=8 2024-11-21T00:27:56,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/hbase-staging 2024-11-21T00:27:56,091 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:56,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:56,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:56,091 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:56,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:56,091 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:56,091 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:27:56,091 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:56,092 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42535 2024-11-21T00:27:56,093 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42535 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:27:56,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:425350x0, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:56,224 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42535-0x1015aca43ea0000 connected 2024-11-21T00:27:56,315 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:56,317 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:56,322 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on znode that does not yet exist, /01310799061/running 2024-11-21T00:27:56,322 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5, hbase.cluster.distributed=false 2024-11-21T00:27:56,324 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on znode that does not yet exist, /01310799061/acl 2024-11-21T00:27:56,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42535 2024-11-21T00:27:56,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42535 2024-11-21T00:27:56,372 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42535 2024-11-21T00:27:56,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42535 2024-11-21T00:27:56,405 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42535 2024-11-21T00:27:56,424 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:27:56,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:56,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:56,424 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:27:56,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:27:56,424 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:27:56,424 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:27:56,425 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:27:56,425 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45749 2024-11-21T00:27:56,426 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45749 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:27:56,427 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:56,429 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:56,441 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457490x0, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:56,441 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:457490x0, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on znode that does not yet exist, /01310799061/running 2024-11-21T00:27:56,442 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45749-0x1015aca43ea0001 connected 2024-11-21T00:27:56,442 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:27:56,445 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:27:56,446 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on znode that does not yet exist, /01310799061/master 2024-11-21T00:27:56,447 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on znode that does not yet exist, /01310799061/acl 2024-11-21T00:27:56,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45749 2024-11-21T00:27:56,468 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45749 2024-11-21T00:27:56,469 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45749 2024-11-21T00:27:56,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45749 2024-11-21T00:27:56,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45749 2024-11-21T00:27:56,504 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:42535 2024-11-21T00:27:56,508 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /01310799061/backup-masters/5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:56,515 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061/backup-masters 2024-11-21T00:27:56,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061/backup-masters 2024-11-21T00:27:56,517 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on existing znode=/01310799061/backup-masters/5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:56,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:56,526 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01310799061/master 2024-11-21T00:27:56,526 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:56,526 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on existing znode=/01310799061/master 2024-11-21T00:27:56,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /01310799061/backup-masters/5ed4808ef0e6,42535,1732148876091 from backup master directory 2024-11-21T00:27:56,536 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061/backup-masters 2024-11-21T00:27:56,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01310799061/backup-masters/5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:56,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061/backup-masters 2024-11-21T00:27:56,536 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:56,536 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:56,540 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/hbase.id] with ID: f531fde5-6173-43d1-970f-150eb244b957 2024-11-21T00:27:56,540 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/.tmp/hbase.id 2024-11-21T00:27:56,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:27:56,741 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:27:56,958 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/.tmp/hbase.id]:[hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/hbase.id] 2024-11-21T00:27:56,968 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:27:56,968 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:27:56,969 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:27:57,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,020 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:27:57,030 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:27:57,031 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:27:57,032 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:57,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:27:57,054 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store 2024-11-21T00:27:57,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:27:57,266 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=31, created chunk count=42, reused chunk count=88, reuseRatio=67.69% 2024-11-21T00:27:57,267 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-21T00:27:57,461 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:57,461 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:27:57,461 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:57,461 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:57,461 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:27:57,461 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:57,461 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:27:57,461 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148877461Disabling compacts and flushes for region at 1732148877461Disabling writes for close at 1732148877461Writing region close event to WAL at 1732148877461Closed at 1732148877461 2024-11-21T00:27:57,462 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/.initializing 2024-11-21T00:27:57,462 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/WALs/5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:57,462 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:57,464 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C42535%2C1732148876091, suffix=, logDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/WALs/5ed4808ef0e6,42535,1732148876091, archiveDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/oldWALs, maxLogs=10 2024-11-21T00:27:57,483 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/WALs/5ed4808ef0e6,42535,1732148876091/5ed4808ef0e6%2C42535%2C1732148876091.1732148877464, exclude list is [], retry=0 2024-11-21T00:27:57,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42735,DS-8b268792-34c9-45e7-a4c2-f58b45d879c0,DISK] 2024-11-21T00:27:57,488 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/WALs/5ed4808ef0e6,42535,1732148876091/5ed4808ef0e6%2C42535%2C1732148876091.1732148877464 2024-11-21T00:27:57,490 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45503:45503)] 2024-11-21T00:27:57,490 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:57,490 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:57,491 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,491 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:27:57,494 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:57,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:27:57,495 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:57,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,497 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:27:57,497 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:57,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:27:57,501 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,501 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:27:57,501 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,502 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,502 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,504 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,504 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,508 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:57,509 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:27:57,511 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:57,511 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72571844, jitterRate=0.08140474557876587}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:57,512 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148877491Initializing all the Stores at 1732148877491Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148877491Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148877492 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148877492Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148877492Cleaning up temporary data from old regions at 1732148877504 (+12 ms)Region opened successfully at 1732148877512 (+8 ms) 2024-11-21T00:27:57,512 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:27:57,514 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6763115c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:57,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:27:57,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:27:57,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:27:57,515 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:27:57,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:27:57,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:27:57,516 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:27:57,518 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:27:57,519 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Unable to get data of znode /01310799061/balancer because node does not exist (not necessarily an error) 2024-11-21T00:27:57,567 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01310799061/balancer already deleted, retry=false 2024-11-21T00:27:57,568 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:27:57,568 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Unable to get data of znode /01310799061/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:27:57,580 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01310799061/normalizer already deleted, retry=false 2024-11-21T00:27:57,580 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:27:57,582 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Unable to get data of znode /01310799061/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:27:57,588 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01310799061/switch/split already deleted, retry=false 2024-11-21T00:27:57,589 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Unable to get data of znode /01310799061/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:27:57,599 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01310799061/switch/merge already deleted, retry=false 2024-11-21T00:27:57,601 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Unable to get data of znode /01310799061/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:27:57,609 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01310799061/snapshot-cleanup already deleted, retry=false 2024-11-21T00:27:57,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01310799061/running 2024-11-21T00:27:57,620 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01310799061/running 2024-11-21T00:27:57,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,620 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,620 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,42535,1732148876091, sessionid=0x1015aca43ea0000, setting cluster-up flag (Was=false) 2024-11-21T00:27:57,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,641 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,683 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01310799061/flush-table-proc/acquired, /01310799061/flush-table-proc/reached, /01310799061/flush-table-proc/abort 2024-11-21T00:27:57,685 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:57,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,704 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:57,737 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01310799061/online-snapshot/acquired, /01310799061/online-snapshot/reached, /01310799061/online-snapshot/abort 2024-11-21T00:27:57,744 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:57,745 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:27:57,750 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:57,751 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:27:57,751 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:27:57,751 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,42535,1732148876091 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:57,753 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,771 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:57,771 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:27:57,780 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,780 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148907780 2024-11-21T00:27:57,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:27:57,781 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:27:57,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:27:57,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:27:57,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:27:57,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:27:57,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:27:57,781 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,782 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:27:57,782 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:27:57,782 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:27:57,782 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:27:57,788 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:27:57,788 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:27:57,788 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148877788,5,FailOnTimeoutGroup] 2024-11-21T00:27:57,792 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148877788,5,FailOnTimeoutGroup] 2024-11-21T00:27:57,792 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,792 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:27:57,792 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,792 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:27:57,797 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(746): ClusterId : f531fde5-6173-43d1-970f-150eb244b957 2024-11-21T00:27:57,797 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:27:57,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:27:57,797 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5 2024-11-21T00:27:57,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:27:57,810 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:27:57,810 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:27:57,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:57,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:57,817 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:57,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:57,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:57,818 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:57,818 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:57,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:57,819 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:57,819 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:57,819 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:57,820 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:57,820 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:57,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:57,821 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:27:57,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:57,821 DEBUG [RS:0;5ed4808ef0e6:45749 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1208dcda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:27:57,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740 2024-11-21T00:27:57,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740 2024-11-21T00:27:57,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:57,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:57,822 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:57,823 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:57,829 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:27:57,829 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74081178, jitterRate=0.10389557480812073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:57,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148877811Initializing all the Stores at 1732148877812 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148877812Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148877816 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148877816Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148877816Cleaning up temporary data from old regions at 1732148877822 (+6 ms)Region opened successfully at 1732148877829 (+7 ms) 2024-11-21T00:27:57,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:27:57,829 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:27:57,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:27:57,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:27:57,829 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:27:57,832 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:27:57,832 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148877829Disabling compacts and flushes for region at 1732148877829Disabling writes for close at 1732148877829Writing region close event to WAL at 1732148877832 (+3 ms)Closed at 1732148877832 2024-11-21T00:27:57,833 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:57,833 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:27:57,833 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:27:57,834 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:57,835 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:27:57,839 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:45749 2024-11-21T00:27:57,839 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:27:57,839 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:27:57,839 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:27:57,840 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,42535,1732148876091 with port=45749, startcode=1732148876424 2024-11-21T00:27:57,840 DEBUG [RS:0;5ed4808ef0e6:45749 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:27:57,841 INFO [HMaster-EventLoopGroup-37-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40149, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.17 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:27:57,841 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42535 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:57,841 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42535 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:57,842 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5 2024-11-21T00:27:57,842 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40339 2024-11-21T00:27:57,842 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:27:57,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061/rs 2024-11-21T00:27:57,852 DEBUG [RS:0;5ed4808ef0e6:45749 {}] zookeeper.ZKUtil(111): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on existing znode=/01310799061/rs/5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:57,852 WARN [RS:0;5ed4808ef0e6:45749 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:27:57,852 INFO [RS:0;5ed4808ef0e6:45749 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:57,852 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:57,852 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,45749,1732148876424] 2024-11-21T00:27:57,858 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:27:57,860 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:27:57,860 INFO [RS:0;5ed4808ef0e6:45749 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:27:57,860 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,860 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:27:57,861 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:27:57,861 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,861 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,862 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:27:57,862 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:57,862 DEBUG [RS:0;5ed4808ef0e6:45749 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:27:57,864 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,864 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,864 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,864 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,864 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,864 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,45749,1732148876424-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:57,881 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:27:57,881 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,45749,1732148876424-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,881 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,881 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.Replication(171): 5ed4808ef0e6,45749,1732148876424 started 2024-11-21T00:27:57,899 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:57,899 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,45749,1732148876424, RpcServer on 5ed4808ef0e6/172.17.0.2:45749, sessionid=0x1015aca43ea0001 2024-11-21T00:27:57,899 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:27:57,899 DEBUG [RS:0;5ed4808ef0e6:45749 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:57,900 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,45749,1732148876424' 2024-11-21T00:27:57,900 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01310799061/flush-table-proc/abort' 2024-11-21T00:27:57,900 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01310799061/flush-table-proc/acquired' 2024-11-21T00:27:57,900 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:27:57,901 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:27:57,901 DEBUG [RS:0;5ed4808ef0e6:45749 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:57,901 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,45749,1732148876424' 2024-11-21T00:27:57,901 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01310799061/online-snapshot/abort' 2024-11-21T00:27:57,901 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01310799061/online-snapshot/acquired' 2024-11-21T00:27:57,901 DEBUG [RS:0;5ed4808ef0e6:45749 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:27:57,901 INFO [RS:0;5ed4808ef0e6:45749 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:27:57,901 INFO [RS:0;5ed4808ef0e6:45749 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:27:57,985 WARN [5ed4808ef0e6:42535 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:27:58,002 INFO [RS:0;5ed4808ef0e6:45749 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:27:58,003 INFO [RS:0;5ed4808ef0e6:45749 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45749%2C1732148876424, suffix=, logDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424, archiveDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/oldWALs, maxLogs=10 2024-11-21T00:27:58,030 DEBUG [RS:0;5ed4808ef0e6:45749 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003, exclude list is [], retry=0 2024-11-21T00:27:58,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42735,DS-8b268792-34c9-45e7-a4c2-f58b45d879c0,DISK] 2024-11-21T00:27:58,050 INFO [RS:0;5ed4808ef0e6:45749 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 2024-11-21T00:27:58,052 DEBUG [RS:0;5ed4808ef0e6:45749 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45503:45503)] 2024-11-21T00:27:58,235 DEBUG [5ed4808ef0e6:42535 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:27:58,236 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:58,237 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,45749,1732148876424, state=OPENING 2024-11-21T00:27:58,241 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:27:58,251 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:58,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:27:58,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01310799061/meta-region-server: CHANGED 2024-11-21T00:27:58,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01310799061/meta-region-server: CHANGED 2024-11-21T00:27:58,252 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:27:58,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45749,1732148876424}] 2024-11-21T00:27:58,417 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:27:58,422 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-38-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45263, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:27:58,433 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:27:58,433 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:27:58,433 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:27:58,435 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45749%2C1732148876424.meta, suffix=.meta, logDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424, archiveDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/oldWALs, maxLogs=10 2024-11-21T00:27:58,478 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.meta.1732148878435.meta, exclude list is [], retry=0 2024-11-21T00:27:58,481 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42735,DS-8b268792-34c9-45e7-a4c2-f58b45d879c0,DISK] 2024-11-21T00:27:58,518 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.meta.1732148878435.meta 2024-11-21T00:27:58,528 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45503:45503)] 2024-11-21T00:27:58,528 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:27:58,529 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:27:58,529 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:27:58,529 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:27:58,529 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:27:58,529 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:27:58,529 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:27:58,529 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:27:58,529 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:27:58,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:27:58,552 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:27:58,552 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:58,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:58,555 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:27:58,556 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:27:58,556 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:58,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:58,557 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:27:58,558 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:27:58,558 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:58,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:58,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:27:58,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:27:58,559 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:27:58,559 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:27:58,560 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:27:58,560 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740 2024-11-21T00:27:58,561 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740 2024-11-21T00:27:58,562 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:27:58,562 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:27:58,562 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:27:58,567 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:27:58,569 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68928846, jitterRate=0.027119845151901245}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:27:58,569 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:27:58,569 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148878530Writing region info on filesystem at 1732148878530Initializing all the Stores at 1732148878530Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148878530Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148878540 (+10 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148878540Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148878540Cleaning up temporary data from old regions at 1732148878562 (+22 ms)Running coprocessor post-open hooks at 1732148878569 (+7 ms)Region opened successfully at 1732148878569 2024-11-21T00:27:58,570 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148878417 2024-11-21T00:27:58,573 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:58,574 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,45749,1732148876424, state=OPEN 2024-11-21T00:27:58,575 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:27:58,575 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:27:58,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01310799061/meta-region-server 2024-11-21T00:27:58,649 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:27:58,649 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01310799061/meta-region-server: CHANGED 2024-11-21T00:27:58,650 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01310799061/meta-region-server 2024-11-21T00:27:58,650 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01310799061/meta-region-server: CHANGED 2024-11-21T00:27:58,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:27:58,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45749,1732148876424 in 397 msec 2024-11-21T00:27:58,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:27:58,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 818 msec 2024-11-21T00:27:58,656 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:27:58,656 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:27:58,657 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:58,657 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45749,1732148876424, seqNum=-1] 2024-11-21T00:27:58,658 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:58,659 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-38-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51825, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:58,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 923 msec 2024-11-21T00:27:58,675 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148878675, completionTime=-1 2024-11-21T00:27:58,675 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:27:58,675 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148938678 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732148998678 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42535,1732148876091-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42535,1732148876091-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42535,1732148876091-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:42535, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:58,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:58,679 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:58,681 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.146sec 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42535,1732148876091-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:27:58,682 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42535,1732148876091-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:27:58,700 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:27:58,700 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:27:58,700 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42535,1732148876091-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:27:58,700 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a1f39e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:58,700 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42535,-1 for getting cluster id 2024-11-21T00:27:58,701 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:27:58,704 DEBUG [HMaster-EventLoopGroup-37-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f531fde5-6173-43d1-970f-150eb244b957' 2024-11-21T00:27:58,704 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:27:58,704 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f531fde5-6173-43d1-970f-150eb244b957" 2024-11-21T00:27:58,704 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63e199b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:58,704 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42535,-1] 2024-11-21T00:27:58,705 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:27:58,705 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:27:58,706 INFO [HMaster-EventLoopGroup-37-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36308, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:27:58,709 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f723491, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:27:58,709 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:27:58,710 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45749,1732148876424, seqNum=-1] 2024-11-21T00:27:58,710 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:27:58,712 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-38-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53448, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:27:58,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,42535,1732148876091 2024-11-21T00:27:58,715 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:27:58,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:27:58,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015aca43ea0002 connected 2024-11-21T00:27:58,751 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.log.dir so I do NOT create it in target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227 2024-11-21T00:27:58,752 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:27:58,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.tmp.dir so I do NOT create it in target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227 2024-11-21T00:27:58,752 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:27:58,752 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227 2024-11-21T00:27:58,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:27:58,752 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/cluster_fe5596fc-f95e-3580-43bb-38c5fede92d3, deleteOnExit=true 2024-11-21T00:27:58,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:27:58,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/test.cache.data in system properties and HBase conf 2024-11-21T00:27:58,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:27:58,753 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:58,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:27:58,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:27:59,145 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:59,151 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:27:59,231 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:27:59,231 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:27:59,231 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:27:59,232 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:27:59,242 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4420e372{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:27:59,242 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ccff5d7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:27:59,416 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@489d5c51{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/java.io.tmpdir/jetty-localhost-35911-hadoop-hdfs-3_4_1-tests_jar-_-any-9790980253468061873/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:27:59,427 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5026758a{HTTP/1.1, (http/1.1)}{localhost:35911} 2024-11-21T00:27:59,427 INFO [Time-limited test {}] server.Server(415): Started @613976ms 2024-11-21T00:28:00,209 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:00,220 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:00,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:00,260 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:00,260 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:28:00,278 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a2f4dd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:00,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31e40e83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:00,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@530a01df{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/java.io.tmpdir/jetty-localhost-34681-hadoop-hdfs-3_4_1-tests_jar-_-any-6587062879767956391/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:00,433 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2644a270{HTTP/1.1, (http/1.1)}{localhost:34681} 2024-11-21T00:28:00,433 INFO [Time-limited test {}] server.Server(415): Started @614982ms 2024-11-21T00:28:00,434 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:28:01,561 WARN [Thread-3612 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/cluster_fe5596fc-f95e-3580-43bb-38c5fede92d3/data/data1/current/BP-721868918-172.17.0.2-1732148878788/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:01,562 WARN [Thread-3613 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/cluster_fe5596fc-f95e-3580-43bb-38c5fede92d3/data/data2/current/BP-721868918-172.17.0.2-1732148878788/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:01,652 WARN [Thread-3600 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:28:01,662 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86e8fb409d956de with lease ID 0xe6e9d72063e09b59: Processing first storage report for DS-814fcd31-845d-4bd2-aac0-e6306a17174f from datanode DatanodeRegistration(127.0.0.1:33609, datanodeUuid=f6306f22-29c3-4b8a-9643-5bda58eb0e00, infoPort=37401, infoSecurePort=0, ipcPort=42689, storageInfo=lv=-57;cid=testClusterID;nsid=455739849;c=1732148878788) 2024-11-21T00:28:01,662 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86e8fb409d956de with lease ID 0xe6e9d72063e09b59: from storage DS-814fcd31-845d-4bd2-aac0-e6306a17174f node DatanodeRegistration(127.0.0.1:33609, datanodeUuid=f6306f22-29c3-4b8a-9643-5bda58eb0e00, infoPort=37401, infoSecurePort=0, ipcPort=42689, storageInfo=lv=-57;cid=testClusterID;nsid=455739849;c=1732148878788), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:01,662 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x86e8fb409d956de with lease ID 0xe6e9d72063e09b59: Processing first storage report for DS-4498d7c2-4be5-4b37-a9b8-d3d36e7aa9eb from datanode DatanodeRegistration(127.0.0.1:33609, datanodeUuid=f6306f22-29c3-4b8a-9643-5bda58eb0e00, infoPort=37401, infoSecurePort=0, ipcPort=42689, storageInfo=lv=-57;cid=testClusterID;nsid=455739849;c=1732148878788) 2024-11-21T00:28:01,662 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x86e8fb409d956de with lease ID 0xe6e9d72063e09b59: from storage DS-4498d7c2-4be5-4b37-a9b8-d3d36e7aa9eb node DatanodeRegistration(127.0.0.1:33609, datanodeUuid=f6306f22-29c3-4b8a-9643-5bda58eb0e00, infoPort=37401, infoSecurePort=0, ipcPort=42689, storageInfo=lv=-57;cid=testClusterID;nsid=455739849;c=1732148878788), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:01,681 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227 2024-11-21T00:28:01,681 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:01,683 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:01,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:28:02,159 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a with version=8 2024-11-21T00:28:02,159 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/hbase-staging 2024-11-21T00:28:02,162 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:02,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:02,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:02,162 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:02,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:02,162 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:02,162 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:28:02,162 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:02,163 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38567 2024-11-21T00:28:02,164 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38567 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:28:02,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:385670x0, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:02,224 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38567-0x1015aca43ea0003 connected 2024-11-21T00:28:02,315 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:02,316 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:02,321 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on znode that does not yet exist, /1-528321262/running 2024-11-21T00:28:02,321 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a, hbase.cluster.distributed=false 2024-11-21T00:28:02,325 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on znode that does not yet exist, /1-528321262/acl 2024-11-21T00:28:02,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38567 2024-11-21T00:28:02,343 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38567 2024-11-21T00:28:02,344 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38567 2024-11-21T00:28:02,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38567 2024-11-21T00:28:02,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38567 2024-11-21T00:28:02,417 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:02,417 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:02,417 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:02,417 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:02,417 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:02,417 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:02,417 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:28:02,417 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:02,428 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43643 2024-11-21T00:28:02,430 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43643 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:28:02,431 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:02,434 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:02,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:436430x0, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:02,465 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:436430x0, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on znode that does not yet exist, /1-528321262/running 2024-11-21T00:28:02,465 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:28:02,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43643-0x1015aca43ea0004 connected 2024-11-21T00:28:02,496 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:28:02,497 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on znode that does not yet exist, /1-528321262/master 2024-11-21T00:28:02,498 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on znode that does not yet exist, /1-528321262/acl 2024-11-21T00:28:02,528 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43643 2024-11-21T00:28:02,539 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43643 2024-11-21T00:28:02,570 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43643 2024-11-21T00:28:02,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43643 2024-11-21T00:28:02,595 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43643 2024-11-21T00:28:02,611 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:38567 2024-11-21T00:28:02,617 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1-528321262/backup-masters/5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:02,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262/backup-masters 2024-11-21T00:28:02,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262/backup-masters 2024-11-21T00:28:02,622 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on existing znode=/1-528321262/backup-masters/5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:02,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:02,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-528321262/master 2024-11-21T00:28:02,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:02,631 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on existing znode=/1-528321262/master 2024-11-21T00:28:02,632 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1-528321262/backup-masters/5ed4808ef0e6,38567,1732148882162 from backup master directory 2024-11-21T00:28:02,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262/backup-masters 2024-11-21T00:28:02,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-528321262/backup-masters/5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:02,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262/backup-masters 2024-11-21T00:28:02,643 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:02,643 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:02,654 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/hbase.id] with ID: 8d153efa-dd7f-4816-8e94-3c313345b6d9 2024-11-21T00:28:02,654 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/.tmp/hbase.id 2024-11-21T00:28:02,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:28:02,674 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/.tmp/hbase.id]:[hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/hbase.id] 2024-11-21T00:28:02,688 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:02,688 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:28:02,689 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:28:02,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:02,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:02,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:28:02,748 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:02,749 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:28:02,749 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:02,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:28:02,780 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store 2024-11-21T00:28:02,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:28:02,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:02,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:02,814 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:02,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:02,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:02,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:02,814 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:02,814 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148882814Disabling compacts and flushes for region at 1732148882814Disabling writes for close at 1732148882814Writing region close event to WAL at 1732148882814Closed at 1732148882814 2024-11-21T00:28:02,816 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/.initializing 2024-11-21T00:28:02,816 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/WALs/5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:02,817 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:02,818 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C38567%2C1732148882162, suffix=, logDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/WALs/5ed4808ef0e6,38567,1732148882162, archiveDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/oldWALs, maxLogs=10 2024-11-21T00:28:02,848 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/WALs/5ed4808ef0e6,38567,1732148882162/5ed4808ef0e6%2C38567%2C1732148882162.1732148882818, exclude list is [], retry=0 2024-11-21T00:28:02,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33609,DS-814fcd31-845d-4bd2-aac0-e6306a17174f,DISK] 2024-11-21T00:28:02,880 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/WALs/5ed4808ef0e6,38567,1732148882162/5ed4808ef0e6%2C38567%2C1732148882162.1732148882818 2024-11-21T00:28:02,884 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37401:37401)] 2024-11-21T00:28:02,884 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:02,885 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:02,885 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,885 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:28:02,904 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:02,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:02,907 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,909 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:28:02,909 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:02,911 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:02,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:28:02,915 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:02,915 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:02,916 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,916 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:28:02,916 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:02,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:02,917 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,917 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,918 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,919 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,919 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,919 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:02,921 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:02,941 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:02,945 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70860529, jitterRate=0.05590416491031647}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:02,945 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148882885Initializing all the Stores at 1732148882885Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148882885Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148882886 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148882886Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148882886Cleaning up temporary data from old regions at 1732148882919 (+33 ms)Region opened successfully at 1732148882945 (+26 ms) 2024-11-21T00:28:02,968 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:28:02,983 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@111f623c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:02,983 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:28:02,984 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:28:02,984 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:28:02,984 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:28:02,985 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-21T00:28:02,986 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:28:02,986 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:28:02,995 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:28:02,997 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Unable to get data of znode /1-528321262/balancer because node does not exist (not necessarily an error) 2024-11-21T00:28:03,019 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-528321262/balancer already deleted, retry=false 2024-11-21T00:28:03,020 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:28:03,020 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Unable to get data of znode /1-528321262/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:28:03,107 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-528321262/normalizer already deleted, retry=false 2024-11-21T00:28:03,107 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:28:03,118 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Unable to get data of znode /1-528321262/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:28:03,128 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-528321262/switch/split already deleted, retry=false 2024-11-21T00:28:03,132 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Unable to get data of znode /1-528321262/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:28:03,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-528321262/switch/merge already deleted, retry=false 2024-11-21T00:28:03,172 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Unable to get data of znode /1-528321262/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:28:03,243 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1-528321262/snapshot-cleanup already deleted, retry=false 2024-11-21T00:28:03,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-528321262/running 2024-11-21T00:28:03,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1-528321262/running 2024-11-21T00:28:03,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:03,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:03,405 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,38567,1732148882162, sessionid=0x1015aca43ea0003, setting cluster-up flag (Was=false) 2024-11-21T00:28:03,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:03,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:03,462 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-528321262/flush-table-proc/acquired, /1-528321262/flush-table-proc/reached, /1-528321262/flush-table-proc/abort 2024-11-21T00:28:03,463 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:03,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:03,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:03,516 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1-528321262/online-snapshot/acquired, /1-528321262/online-snapshot/reached, /1-528321262/online-snapshot/abort 2024-11-21T00:28:03,517 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:03,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:28:03,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,533 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,534 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:03,534 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:28:03,534 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:28:03,535 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,38567,1732148882162 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:28:03,545 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:03,545 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:03,546 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:03,546 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:03,546 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:28:03,546 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,546 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:03,546 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,565 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:03,565 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:28:03,567 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:03,567 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:28:03,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,601 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,603 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148913615 2024-11-21T00:28:03,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:28:03,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:28:03,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:28:03,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:28:03,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:28:03,615 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:28:03,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:03,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:28:03,643 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,644 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:28:03,644 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:28:03,644 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:28:03,644 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:28:03,645 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:28:03,645 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a 2024-11-21T00:28:03,661 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(746): ClusterId : 8d153efa-dd7f-4816-8e94-3c313345b6d9 2024-11-21T00:28:03,661 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:28:03,673 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:28:03,673 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:28:03,675 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:28:03,675 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:28:03,689 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:28:03,690 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@148c0680, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:03,704 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148883675,5,FailOnTimeoutGroup] 2024-11-21T00:28:03,728 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148883704,5,FailOnTimeoutGroup] 2024-11-21T00:28:03,728 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,728 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:28:03,728 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,728 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,744 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:43643 2024-11-21T00:28:03,744 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:28:03,744 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:28:03,744 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:28:03,749 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,38567,1732148882162 with port=43643, startcode=1732148882416 2024-11-21T00:28:03,750 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:28:03,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:28:03,753 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:03,767 INFO [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38787, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.18 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:28:03,768 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38567 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:03,768 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38567 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:03,769 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a 2024-11-21T00:28:03,769 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41775 2024-11-21T00:28:03,769 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:28:03,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:03,779 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:03,779 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:03,792 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:03,793 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:03,794 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:03,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:03,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:03,794 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:03,795 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:03,795 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:03,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:03,797 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:03,799 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:03,799 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:03,799 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:03,799 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:03,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740 2024-11-21T00:28:03,800 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740 2024-11-21T00:28:03,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:03,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:03,802 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:03,802 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:03,819 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:03,819 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68761264, jitterRate=0.024622678756713867}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:03,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262/rs 2024-11-21T00:28:03,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148883753Initializing all the Stores at 1732148883754 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148883754Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148883777 (+23 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148883777Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148883777Cleaning up temporary data from old regions at 1732148883801 (+24 ms)Region opened successfully at 1732148883819 (+18 ms) 2024-11-21T00:28:03,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:03,820 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:03,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:03,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:03,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:03,820 DEBUG [RS:0;5ed4808ef0e6:43643 {}] zookeeper.ZKUtil(111): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on existing znode=/1-528321262/rs/5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:03,820 WARN [RS:0;5ed4808ef0e6:43643 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:03,820 INFO [RS:0;5ed4808ef0e6:43643 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:03,820 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:03,832 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,43643,1732148882416] 2024-11-21T00:28:03,844 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:03,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148883819Disabling compacts and flushes for region at 1732148883819Disabling writes for close at 1732148883820 (+1 ms)Writing region close event to WAL at 1732148883844 (+24 ms)Closed at 1732148883844 2024-11-21T00:28:03,845 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:03,845 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:28:03,846 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:28:03,847 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:03,848 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:28:03,870 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:28:03,872 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:28:03,874 INFO [RS:0;5ed4808ef0e6:43643 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:28:03,874 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,876 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:28:03,879 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:28:03,879 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,879 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,879 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,879 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,879 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,879 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,879 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:03,879 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,880 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,880 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,880 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,880 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,880 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:03,880 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:03,880 DEBUG [RS:0;5ed4808ef0e6:43643 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:03,892 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,892 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,892 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,892 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,892 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,892 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,43643,1732148882416-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:03,927 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:28:03,928 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,43643,1732148882416-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,928 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,928 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.Replication(171): 5ed4808ef0e6,43643,1732148882416 started 2024-11-21T00:28:03,968 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:03,968 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,43643,1732148882416, RpcServer on 5ed4808ef0e6/172.17.0.2:43643, sessionid=0x1015aca43ea0004 2024-11-21T00:28:03,968 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:28:03,968 DEBUG [RS:0;5ed4808ef0e6:43643 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:03,968 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,43643,1732148882416' 2024-11-21T00:28:03,968 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-528321262/flush-table-proc/abort' 2024-11-21T00:28:03,969 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-528321262/flush-table-proc/acquired' 2024-11-21T00:28:03,970 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:28:03,970 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:28:03,970 DEBUG [RS:0;5ed4808ef0e6:43643 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:03,970 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,43643,1732148882416' 2024-11-21T00:28:03,970 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1-528321262/online-snapshot/abort' 2024-11-21T00:28:03,970 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1-528321262/online-snapshot/acquired' 2024-11-21T00:28:03,975 DEBUG [RS:0;5ed4808ef0e6:43643 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:28:03,975 INFO [RS:0;5ed4808ef0e6:43643 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:28:03,975 INFO [RS:0;5ed4808ef0e6:43643 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:28:03,998 WARN [5ed4808ef0e6:38567 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:28:04,075 INFO [RS:0;5ed4808ef0e6:43643 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:04,076 INFO [RS:0;5ed4808ef0e6:43643 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C43643%2C1732148882416, suffix=, logDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416, archiveDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/oldWALs, maxLogs=10 2024-11-21T00:28:04,097 DEBUG [RS:0;5ed4808ef0e6:43643 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, exclude list is [], retry=0 2024-11-21T00:28:04,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33609,DS-814fcd31-845d-4bd2-aac0-e6306a17174f,DISK] 2024-11-21T00:28:04,121 INFO [RS:0;5ed4808ef0e6:43643 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 2024-11-21T00:28:04,124 DEBUG [RS:0;5ed4808ef0e6:43643 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37401:37401)] 2024-11-21T00:28:04,144 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:04,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,244 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,244 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,245 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,246 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,250 DEBUG [5ed4808ef0e6:38567 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:28:04,252 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:04,253 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,43643,1732148882416, state=OPENING 2024-11-21T00:28:04,254 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,261 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:04,272 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:28:04,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:04,283 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:04,283 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-528321262/meta-region-server: CHANGED 2024-11-21T00:28:04,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,43643,1732148882416}] 2024-11-21T00:28:04,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:04,288 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-528321262/meta-region-server: CHANGED 2024-11-21T00:28:04,312 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:28:04,448 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:04,465 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38841, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:28:04,477 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:28:04,477 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:04,477 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:28:04,481 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C43643%2C1732148882416.meta, suffix=.meta, logDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416, archiveDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/oldWALs, maxLogs=10 2024-11-21T00:28:04,502 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.meta.1732148884482.meta, exclude list is [], retry=0 2024-11-21T00:28:04,504 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33609,DS-814fcd31-845d-4bd2-aac0-e6306a17174f,DISK] 2024-11-21T00:28:04,521 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.meta.1732148884482.meta 2024-11-21T00:28:04,532 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37401:37401)] 2024-11-21T00:28:04,532 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:04,533 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:04,533 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:04,533 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:28:04,533 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:28:04,533 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:28:04,533 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:04,533 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:28:04,533 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:28:04,548 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:04,551 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:04,551 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:04,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:04,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:04,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:04,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:04,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:04,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:04,564 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:04,564 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:04,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:04,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:04,571 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:04,571 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:04,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:04,572 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:04,572 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740 2024-11-21T00:28:04,573 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740 2024-11-21T00:28:04,574 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:04,574 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:04,575 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:04,578 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:04,581 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74801450, jitterRate=0.11462846398353577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:04,582 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:28:04,582 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148884533Writing region info on filesystem at 1732148884533Initializing all the Stores at 1732148884534 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148884534Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148884548 (+14 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148884548Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148884548Cleaning up temporary data from old regions at 1732148884574 (+26 ms)Running coprocessor post-open hooks at 1732148884582 (+8 ms)Region opened successfully at 1732148884582 2024-11-21T00:28:04,584 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148884448 2024-11-21T00:28:04,586 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:28:04,586 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:28:04,586 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:04,587 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,43643,1732148882416, state=OPEN 2024-11-21T00:28:04,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-528321262/meta-region-server 2024-11-21T00:28:04,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1-528321262/meta-region-server 2024-11-21T00:28:04,630 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-528321262/meta-region-server: CHANGED 2024-11-21T00:28:04,630 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:04,631 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1-528321262/meta-region-server: CHANGED 2024-11-21T00:28:04,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:28:04,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,43643,1732148882416 in 347 msec 2024-11-21T00:28:04,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:28:04,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 787 msec 2024-11-21T00:28:04,636 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:04,636 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:28:04,637 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:04,637 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43643,1732148882416, seqNum=-1] 2024-11-21T00:28:04,637 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:04,638 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50469, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:04,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1120 sec 2024-11-21T00:28:04,642 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148884642, completionTime=-1 2024-11-21T00:28:04,642 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:28:04,643 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:28:04,644 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148944644 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732149004645 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38567,1732148882162-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38567,1732148882162-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38567,1732148882162-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:38567, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:04,645 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:04,647 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:28:04,648 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.009sec 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38567,1732148882162-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:04,652 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38567,1732148882162-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:28:04,667 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:28:04,667 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:28:04,667 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38567,1732148882162-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:04,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26c5dc9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:04,676 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:04,677 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:04,694 DEBUG [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:04,694 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:04,694 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:04,694 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c4d5c25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:04,694 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:04,695 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:04,695 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:04,696 INFO [HMaster-EventLoopGroup-39-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37528, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:04,697 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1375986a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:04,697 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:04,698 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43643,1732148882416, seqNum=-1] 2024-11-21T00:28:04,698 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:04,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55430, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:04,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:04,703 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:28:04,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:04,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015aca43ea0005 connected 2024-11-21T00:28:04,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.log.dir so I do NOT create it in target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3 2024-11-21T00:28:04,780 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:28:04,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.tmp.dir so I do NOT create it in target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3 2024-11-21T00:28:04,780 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:28:04,781 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/cluster_b0320c97-1961-290a-e797-e4612a89f93b, deleteOnExit=true 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/test.cache.data in system properties and HBase conf 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:28:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:28:04,781 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:28:04,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:28:04,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:04,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:28:04,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:28:05,215 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:05,223 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:05,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:05,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:05,283 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:28:05,284 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:05,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e8625ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:05,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@129ae8a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:05,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a121b35{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/java.io.tmpdir/jetty-localhost-46581-hadoop-hdfs-3_4_1-tests_jar-_-any-5115532515843837218/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:05,455 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@216f8310{HTTP/1.1, (http/1.1)}{localhost:46581} 2024-11-21T00:28:05,455 INFO [Time-limited test {}] server.Server(415): Started @620005ms 2024-11-21T00:28:05,844 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:28:06,147 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:06,150 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:06,184 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:06,184 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:06,185 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:28:06,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f64b84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:06,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bc71f1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:06,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25fb5ae3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/java.io.tmpdir/jetty-localhost-36181-hadoop-hdfs-3_4_1-tests_jar-_-any-11863648627603396980/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:06,333 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@517cb558{HTTP/1.1, (http/1.1)}{localhost:36181} 2024-11-21T00:28:06,333 INFO [Time-limited test {}] server.Server(415): Started @620883ms 2024-11-21T00:28:06,335 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:28:07,201 WARN [Thread-3734 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/cluster_b0320c97-1961-290a-e797-e4612a89f93b/data/data1/current/BP-1977379224-172.17.0.2-1732148884812/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:07,228 WARN [Thread-3735 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/cluster_b0320c97-1961-290a-e797-e4612a89f93b/data/data2/current/BP-1977379224-172.17.0.2-1732148884812/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:07,285 WARN [Thread-3722 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:28:07,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6cea6e447a5d562d with lease ID 0xac8ca25d3faf642b: Processing first storage report for DS-f7ce8dff-c4a9-4000-97b7-4c81619a63a4 from datanode DatanodeRegistration(127.0.0.1:44027, datanodeUuid=efdb4c1b-191b-4fd9-afe3-621191e442c8, infoPort=46429, infoSecurePort=0, ipcPort=42423, storageInfo=lv=-57;cid=testClusterID;nsid=186815660;c=1732148884812) 2024-11-21T00:28:07,290 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cea6e447a5d562d with lease ID 0xac8ca25d3faf642b: from storage DS-f7ce8dff-c4a9-4000-97b7-4c81619a63a4 node DatanodeRegistration(127.0.0.1:44027, datanodeUuid=efdb4c1b-191b-4fd9-afe3-621191e442c8, infoPort=46429, infoSecurePort=0, ipcPort=42423, storageInfo=lv=-57;cid=testClusterID;nsid=186815660;c=1732148884812), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:07,290 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6cea6e447a5d562d with lease ID 0xac8ca25d3faf642b: Processing first storage report for DS-e8545301-9a02-4cec-9128-87fb2c9aba14 from datanode DatanodeRegistration(127.0.0.1:44027, datanodeUuid=efdb4c1b-191b-4fd9-afe3-621191e442c8, infoPort=46429, infoSecurePort=0, ipcPort=42423, storageInfo=lv=-57;cid=testClusterID;nsid=186815660;c=1732148884812) 2024-11-21T00:28:07,291 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6cea6e447a5d562d with lease ID 0xac8ca25d3faf642b: from storage DS-e8545301-9a02-4cec-9128-87fb2c9aba14 node DatanodeRegistration(127.0.0.1:44027, datanodeUuid=efdb4c1b-191b-4fd9-afe3-621191e442c8, infoPort=46429, infoSecurePort=0, ipcPort=42423, storageInfo=lv=-57;cid=testClusterID;nsid=186815660;c=1732148884812), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:07,368 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3 2024-11-21T00:28:07,368 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:07,370 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:07,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:28:07,412 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d with version=8 2024-11-21T00:28:07,413 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/hbase-staging 2024-11-21T00:28:07,415 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:07,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:07,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:07,415 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:07,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:07,415 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:07,415 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:28:07,415 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:07,417 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35935 2024-11-21T00:28:07,418 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35935 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:28:07,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359350x0, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:07,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35935-0x1015aca43ea0006 connected 2024-11-21T00:28:07,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:07,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:07,521 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on znode that does not yet exist, /21019767428/running 2024-11-21T00:28:07,521 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d, hbase.cluster.distributed=false 2024-11-21T00:28:07,522 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on znode that does not yet exist, /21019767428/acl 2024-11-21T00:28:07,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35935 2024-11-21T00:28:07,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35935 2024-11-21T00:28:07,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35935 2024-11-21T00:28:07,552 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35935 2024-11-21T00:28:07,558 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35935 2024-11-21T00:28:07,590 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:07,590 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:07,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:07,591 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:07,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:07,591 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:07,591 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:28:07,591 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:07,604 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45849 2024-11-21T00:28:07,605 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45849 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:28:07,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:07,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:07,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458490x0, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:07,632 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:458490x0, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on znode that does not yet exist, /21019767428/running 2024-11-21T00:28:07,632 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:28:07,636 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45849-0x1015aca43ea0007 connected 2024-11-21T00:28:07,676 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:28:07,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on znode that does not yet exist, /21019767428/master 2024-11-21T00:28:07,678 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on znode that does not yet exist, /21019767428/acl 2024-11-21T00:28:07,708 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45849 2024-11-21T00:28:07,724 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45849 2024-11-21T00:28:07,728 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45849 2024-11-21T00:28:07,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45849 2024-11-21T00:28:07,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45849 2024-11-21T00:28:07,770 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:35935 2024-11-21T00:28:07,785 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /21019767428/backup-masters/5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:07,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428/backup-masters 2024-11-21T00:28:07,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428/backup-masters 2024-11-21T00:28:07,791 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on existing znode=/21019767428/backup-masters/5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:07,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/21019767428/master 2024-11-21T00:28:07,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:07,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:07,799 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on existing znode=/21019767428/master 2024-11-21T00:28:07,799 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /21019767428/backup-masters/5ed4808ef0e6,35935,1732148887414 from backup master directory 2024-11-21T00:28:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428/backup-masters 2024-11-21T00:28:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/21019767428/backup-masters/5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:07,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428/backup-masters 2024-11-21T00:28:07,809 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:07,809 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:07,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/hbase.id] with ID: 727cbd73-2263-4ee7-a05f-23c795f85fd6 2024-11-21T00:28:07,813 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/.tmp/hbase.id 2024-11-21T00:28:07,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:28:07,851 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/.tmp/hbase.id]:[hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/hbase.id] 2024-11-21T00:28:07,862 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:07,863 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:28:07,863 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 0ms. 2024-11-21T00:28:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:07,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:07,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:28:07,909 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:07,910 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:28:07,910 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:07,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:28:07,968 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store 2024-11-21T00:28:07,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:28:07,990 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:07,990 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:07,990 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:07,990 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:07,990 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:07,990 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:07,990 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:07,990 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148887990Disabling compacts and flushes for region at 1732148887990Disabling writes for close at 1732148887990Writing region close event to WAL at 1732148887990Closed at 1732148887990 2024-11-21T00:28:07,991 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/.initializing 2024-11-21T00:28:07,991 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/WALs/5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:07,992 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:07,993 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C35935%2C1732148887414, suffix=, logDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/WALs/5ed4808ef0e6,35935,1732148887414, archiveDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/oldWALs, maxLogs=10 2024-11-21T00:28:08,011 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/WALs/5ed4808ef0e6,35935,1732148887414/5ed4808ef0e6%2C35935%2C1732148887414.1732148887994, exclude list is [], retry=0 2024-11-21T00:28:08,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7ce8dff-c4a9-4000-97b7-4c81619a63a4,DISK] 2024-11-21T00:28:08,043 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/WALs/5ed4808ef0e6,35935,1732148887414/5ed4808ef0e6%2C35935%2C1732148887414.1732148887994 2024-11-21T00:28:08,044 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46429:46429)] 2024-11-21T00:28:08,044 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:08,044 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:08,045 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,045 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,066 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:28:08,066 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:08,072 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:28:08,074 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:08,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:28:08,076 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:08,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:28:08,077 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,078 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:08,078 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,078 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,079 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,080 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,080 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,080 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:08,081 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:08,092 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:08,092 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63691204, jitterRate=-0.05092710256576538}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:08,093 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148888045Initializing all the Stores at 1732148888045Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148888045Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148888064 (+19 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148888064Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148888064Cleaning up temporary data from old regions at 1732148888080 (+16 ms)Region opened successfully at 1732148888093 (+13 ms) 2024-11-21T00:28:08,101 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:28:08,106 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48704a26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:08,110 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:28:08,110 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:28:08,110 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:28:08,110 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:28:08,111 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:28:08,111 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:28:08,111 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:28:08,138 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:28:08,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Unable to get data of znode /21019767428/balancer because node does not exist (not necessarily an error) 2024-11-21T00:28:08,156 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /21019767428/balancer already deleted, retry=false 2024-11-21T00:28:08,157 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:28:08,157 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Unable to get data of znode /21019767428/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:28:08,166 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /21019767428/normalizer already deleted, retry=false 2024-11-21T00:28:08,167 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:28:08,175 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Unable to get data of znode /21019767428/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:28:08,251 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /21019767428/switch/split already deleted, retry=false 2024-11-21T00:28:08,252 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Unable to get data of znode /21019767428/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:28:08,261 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /21019767428/switch/merge already deleted, retry=false 2024-11-21T00:28:08,265 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Unable to get data of znode /21019767428/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:28:08,272 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /21019767428/snapshot-cleanup already deleted, retry=false 2024-11-21T00:28:08,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/21019767428/running 2024-11-21T00:28:08,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/21019767428/running 2024-11-21T00:28:08,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:08,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:08,283 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,35935,1732148887414, sessionid=0x1015aca43ea0006, setting cluster-up flag (Was=false) 2024-11-21T00:28:08,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:08,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:08,398 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /21019767428/flush-table-proc/acquired, /21019767428/flush-table-proc/reached, /21019767428/flush-table-proc/abort 2024-11-21T00:28:08,399 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:08,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:08,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:08,451 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /21019767428/online-snapshot/acquired, /21019767428/online-snapshot/reached, /21019767428/online-snapshot/abort 2024-11-21T00:28:08,452 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:08,453 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:28:08,457 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:08,457 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:28:08,457 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:28:08,458 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,35935,1732148887414 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:08,470 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,479 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(746): ClusterId : 727cbd73-2263-4ee7-a05f-23c795f85fd6 2024-11-21T00:28:08,479 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:28:08,483 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:28:08,483 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:28:08,494 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:28:08,494 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ec1510f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:08,501 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:08,502 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:28:08,503 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,503 INFO [PEWorker-2 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:28:08,527 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:45849 2024-11-21T00:28:08,527 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:28:08,527 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:28:08,527 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:28:08,528 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,35935,1732148887414 with port=45849, startcode=1732148887589 2024-11-21T00:28:08,528 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:28:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148918529 2024-11-21T00:28:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:28:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:28:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:28:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:28:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:28:08,529 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:28:08,530 INFO [HMaster-EventLoopGroup-41-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50309, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.19 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:28:08,531 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35935 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:28:08,531 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-21T00:28:08,531 WARN [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-21T00:28:08,532 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,532 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:28:08,532 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:28:08,532 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:28:08,532 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:28:08,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:28:08,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:28:08,548 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148888540,5,FailOnTimeoutGroup] 2024-11-21T00:28:08,560 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148888548,5,FailOnTimeoutGroup] 2024-11-21T00:28:08,560 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,560 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:28:08,560 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,560 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:28:08,598 INFO [PEWorker-2 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:28:08,598 INFO [PEWorker-2 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d 2024-11-21T00:28:08,632 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,35935,1732148887414 with port=45849, startcode=1732148887589 2024-11-21T00:28:08,633 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35935 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:08,633 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35935 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:08,635 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d 2024-11-21T00:28:08,635 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35655 2024-11-21T00:28:08,635 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:28:08,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:28:08,663 DEBUG [PEWorker-2 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:08,680 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:08,691 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:08,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:08,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:08,694 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:08,694 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:08,695 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:08,696 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:08,696 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:08,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:08,699 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:08,699 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:08,700 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:08,700 DEBUG [PEWorker-2 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:08,701 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740 2024-11-21T00:28:08,702 DEBUG [PEWorker-2 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740 2024-11-21T00:28:08,703 DEBUG [PEWorker-2 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:08,703 DEBUG [PEWorker-2 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:08,703 DEBUG [PEWorker-2 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:08,704 DEBUG [PEWorker-2 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:08,712 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:08,713 INFO [PEWorker-2 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63848300, jitterRate=-0.04858618974685669}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:08,713 DEBUG [PEWorker-2 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148888663Initializing all the Stores at 1732148888666 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148888666Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148888680 (+14 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148888680Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148888680Cleaning up temporary data from old regions at 1732148888703 (+23 ms)Region opened successfully at 1732148888713 (+10 ms) 2024-11-21T00:28:08,713 DEBUG [PEWorker-2 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:08,713 INFO [PEWorker-2 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:08,713 DEBUG [PEWorker-2 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:08,713 DEBUG [PEWorker-2 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:08,713 DEBUG [PEWorker-2 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:08,714 INFO [PEWorker-2 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:08,714 DEBUG [PEWorker-2 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148888713Disabling compacts and flushes for region at 1732148888713Disabling writes for close at 1732148888713Writing region close event to WAL at 1732148888714 (+1 ms)Closed at 1732148888714 2024-11-21T00:28:08,715 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:08,715 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:28:08,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:28:08,716 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:08,717 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:28:08,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428/rs 2024-11-21T00:28:08,728 DEBUG [RS:0;5ed4808ef0e6:45849 {}] zookeeper.ZKUtil(111): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on existing znode=/21019767428/rs/5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:08,728 WARN [RS:0;5ed4808ef0e6:45849 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:08,728 INFO [RS:0;5ed4808ef0e6:45849 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:08,728 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:08,733 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,45849,1732148887589] 2024-11-21T00:28:08,746 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:28:08,756 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:28:08,760 INFO [RS:0;5ed4808ef0e6:45849 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:28:08,760 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,764 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:28:08,765 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:28:08,765 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:08,766 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:08,767 DEBUG [RS:0;5ed4808ef0e6:45849 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:08,778 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,778 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,778 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,778 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,778 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,778 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,45849,1732148887589-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:08,800 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:28:08,801 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,45849,1732148887589-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,801 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,801 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.Replication(171): 5ed4808ef0e6,45849,1732148887589 started 2024-11-21T00:28:08,821 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:08,821 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,45849,1732148887589, RpcServer on 5ed4808ef0e6/172.17.0.2:45849, sessionid=0x1015aca43ea0007 2024-11-21T00:28:08,821 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:28:08,822 DEBUG [RS:0;5ed4808ef0e6:45849 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:08,822 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,45849,1732148887589' 2024-11-21T00:28:08,822 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/21019767428/flush-table-proc/abort' 2024-11-21T00:28:08,822 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/21019767428/flush-table-proc/acquired' 2024-11-21T00:28:08,823 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:28:08,823 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:28:08,823 DEBUG [RS:0;5ed4808ef0e6:45849 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:08,823 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,45849,1732148887589' 2024-11-21T00:28:08,823 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/21019767428/online-snapshot/abort' 2024-11-21T00:28:08,823 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/21019767428/online-snapshot/acquired' 2024-11-21T00:28:08,824 DEBUG [RS:0;5ed4808ef0e6:45849 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:28:08,824 INFO [RS:0;5ed4808ef0e6:45849 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:28:08,824 INFO [RS:0;5ed4808ef0e6:45849 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:28:08,867 WARN [5ed4808ef0e6:35935 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-21T00:28:08,924 INFO [RS:0;5ed4808ef0e6:45849 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:08,927 INFO [RS:0;5ed4808ef0e6:45849 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45849%2C1732148887589, suffix=, logDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589, archiveDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/oldWALs, maxLogs=10 2024-11-21T00:28:08,947 DEBUG [RS:0;5ed4808ef0e6:45849 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, exclude list is [], retry=0 2024-11-21T00:28:08,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7ce8dff-c4a9-4000-97b7-4c81619a63a4,DISK] 2024-11-21T00:28:08,956 INFO [RS:0;5ed4808ef0e6:45849 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 2024-11-21T00:28:08,974 DEBUG [RS:0;5ed4808ef0e6:45849 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46429:46429)] 2024-11-21T00:28:09,117 DEBUG [5ed4808ef0e6:35935 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:28:09,118 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:09,119 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,45849,1732148887589, state=OPENING 2024-11-21T00:28:09,145 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:28:09,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:09,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:09,232 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /21019767428/meta-region-server: CHANGED 2024-11-21T00:28:09,232 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:09,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45849,1732148887589}] 2024-11-21T00:28:09,233 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /21019767428/meta-region-server: CHANGED 2024-11-21T00:28:09,387 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:09,388 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38693, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:28:09,400 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:28:09,401 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:09,401 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:28:09,402 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45849%2C1732148887589.meta, suffix=.meta, logDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589, archiveDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/oldWALs, maxLogs=10 2024-11-21T00:28:09,425 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.meta.1732148889402.meta, exclude list is [], retry=0 2024-11-21T00:28:09,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7ce8dff-c4a9-4000-97b7-4c81619a63a4,DISK] 2024-11-21T00:28:09,436 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.meta.1732148889402.meta 2024-11-21T00:28:09,436 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46429:46429)] 2024-11-21T00:28:09,437 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:09,437 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:09,437 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:09,437 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:28:09,437 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:28:09,437 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:28:09,437 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:09,438 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:28:09,438 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:28:09,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:09,440 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:09,440 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:09,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:09,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:09,444 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:09,444 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:09,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:09,445 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:09,446 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:09,446 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:09,450 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:09,451 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:09,455 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:09,455 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:09,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:09,455 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:09,456 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740 2024-11-21T00:28:09,457 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740 2024-11-21T00:28:09,457 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:09,458 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:09,458 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:09,461 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:09,465 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72851387, jitterRate=0.08557026088237762}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:09,465 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:28:09,465 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148889438Writing region info on filesystem at 1732148889438Initializing all the Stores at 1732148889439 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148889439Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148889439Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148889439Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148889439Cleaning up temporary data from old regions at 1732148889458 (+19 ms)Running coprocessor post-open hooks at 1732148889465 (+7 ms)Region opened successfully at 1732148889465 2024-11-21T00:28:09,468 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148889386 2024-11-21T00:28:09,471 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:09,473 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,45849,1732148887589, state=OPEN 2024-11-21T00:28:09,474 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:28:09,474 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:28:09,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/21019767428/meta-region-server 2024-11-21T00:28:09,503 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/21019767428/meta-region-server 2024-11-21T00:28:09,509 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:09,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /21019767428/meta-region-server: CHANGED 2024-11-21T00:28:09,510 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /21019767428/meta-region-server: CHANGED 2024-11-21T00:28:09,515 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:28:09,515 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45849,1732148887589 in 277 msec 2024-11-21T00:28:09,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:28:09,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 800 msec 2024-11-21T00:28:09,520 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:09,520 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:28:09,523 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:09,524 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45849,1732148887589, seqNum=-1] 2024-11-21T00:28:09,525 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:09,526 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38021, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:09,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0770 sec 2024-11-21T00:28:09,537 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148889537, completionTime=-1 2024-11-21T00:28:09,537 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:28:09,537 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148949540 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732149009540 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35935,1732148887414-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35935,1732148887414-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35935,1732148887414-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:09,540 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:35935, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:09,541 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:09,543 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:28:09,548 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:09,552 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.743sec 2024-11-21T00:28:09,553 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:28:09,553 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:28:09,553 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:28:09,553 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:28:09,553 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:28:09,553 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35935,1732148887414-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:09,553 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35935,1732148887414-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:28:09,572 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:28:09,572 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:28:09,572 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35935,1732148887414-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:09,583 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@672e323b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:09,583 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35935,-1 for getting cluster id 2024-11-21T00:28:09,583 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:09,593 DEBUG [HMaster-EventLoopGroup-41-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '727cbd73-2263-4ee7-a05f-23c795f85fd6' 2024-11-21T00:28:09,593 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:09,593 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "727cbd73-2263-4ee7-a05f-23c795f85fd6" 2024-11-21T00:28:09,593 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d138b9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:09,593 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35935,-1] 2024-11-21T00:28:09,594 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:09,594 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:09,595 INFO [HMaster-EventLoopGroup-41-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54170, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:09,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7edd9bad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:09,596 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:09,598 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45849,1732148887589, seqNum=-1] 2024-11-21T00:28:09,598 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:09,601 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41830, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:09,603 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:09,603 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster2 connecting to ZooKeeper ensemble=127.0.0.1:60103 2024-11-21T00:28:09,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster20x0, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:09,634 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:09,635 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42535,1732148876091 2024-11-21T00:28:09,635 DEBUG [Time-limited test {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@48dee05d 2024-11-21T00:28:09,635 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:09,637 INFO [HMaster-EventLoopGroup-37-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:09,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:09,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:28:09,644 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster2-0x1015aca43ea0008 connected 2024-11-21T00:28:09,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:09,644 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:09,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:28:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:09,648 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:09,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:28:09,729 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e9f5ea62361f185c33b911c4d081b3e0, NAME => 'test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5 2024-11-21T00:28:09,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:09,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:28:09,791 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:09,791 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing e9f5ea62361f185c33b911c4d081b3e0, disabling compactions & flushes 2024-11-21T00:28:09,792 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:09,792 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:09,792 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. after waiting 0 ms 2024-11-21T00:28:09,792 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:09,792 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:09,792 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for e9f5ea62361f185c33b911c4d081b3e0: Waiting for close lock at 1732148889791Disabling compacts and flushes for region at 1732148889791Disabling writes for close at 1732148889792 (+1 ms)Writing region close event to WAL at 1732148889792Closed at 1732148889792 2024-11-21T00:28:09,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:09,793 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148889793"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148889793"}]},"ts":"1732148889793"} 2024-11-21T00:28:09,795 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:09,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:09,796 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148889796"}]},"ts":"1732148889796"} 2024-11-21T00:28:09,798 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:28:09,798 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=e9f5ea62361f185c33b911c4d081b3e0, ASSIGN}] 2024-11-21T00:28:09,799 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=e9f5ea62361f185c33b911c4d081b3e0, ASSIGN 2024-11-21T00:28:09,800 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=e9f5ea62361f185c33b911c4d081b3e0, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,45749,1732148876424; forceNewPlan=false, retain=false 2024-11-21T00:28:09,951 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e9f5ea62361f185c33b911c4d081b3e0, regionState=OPENING, regionLocation=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:28:09,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=e9f5ea62361f185c33b911c4d081b3e0, ASSIGN because future has completed 2024-11-21T00:28:09,954 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9f5ea62361f185c33b911c4d081b3e0, server=5ed4808ef0e6,45749,1732148876424}] 2024-11-21T00:28:09,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:10,036 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:10,039 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,040 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,186 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,188 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:10,188 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e9f5ea62361f185c33b911c4d081b3e0, NAME => 'test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:10,189 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:10,189 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:28:10,189 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,189 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:10,189 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,189 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,211 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,213 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9f5ea62361f185c33b911c4d081b3e0 columnFamilyName f 2024-11-21T00:28:10,213 DEBUG [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:10,213 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] regionserver.HStore(327): Store=e9f5ea62361f185c33b911c4d081b3e0/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:10,214 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,215 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9f5ea62361f185c33b911c4d081b3e0 columnFamilyName f1 2024-11-21T00:28:10,215 DEBUG [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:10,215 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] regionserver.HStore(327): Store=e9f5ea62361f185c33b911c4d081b3e0/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:10,216 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,218 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e9f5ea62361f185c33b911c4d081b3e0 columnFamilyName norep 2024-11-21T00:28:10,218 DEBUG [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:10,218 INFO [StoreOpener-e9f5ea62361f185c33b911c4d081b3e0-1 {}] regionserver.HStore(327): Store=e9f5ea62361f185c33b911c4d081b3e0/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:10,219 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,219 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:10,219 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,219 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,222 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,222 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,223 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:10,228 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,240 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:10,241 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e9f5ea62361f185c33b911c4d081b3e0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71175327, jitterRate=0.06059502065181732}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:10,241 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:10,241 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e9f5ea62361f185c33b911c4d081b3e0: Running coprocessor pre-open hook at 1732148890189Writing region info on filesystem at 1732148890189Initializing all the Stores at 1732148890201 (+12 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148890201Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148890209 (+8 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148890209Cleaning up temporary data from old regions at 1732148890222 (+13 ms)Running coprocessor post-open hooks at 1732148890241 (+19 ms)Region opened successfully at 1732148890241 2024-11-21T00:28:10,242 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0., pid=6, masterSystemTime=1732148890112 2024-11-21T00:28:10,246 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:10,246 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:10,249 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e9f5ea62361f185c33b911c4d081b3e0, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:28:10,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e9f5ea62361f185c33b911c4d081b3e0, server=5ed4808ef0e6,45749,1732148876424 because future has completed 2024-11-21T00:28:10,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:28:10,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e9f5ea62361f185c33b911c4d081b3e0, server=5ed4808ef0e6,45749,1732148876424 in 314 msec 2024-11-21T00:28:10,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:10,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:28:10,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=e9f5ea62361f185c33b911c4d081b3e0, ASSIGN in 480 msec 2024-11-21T00:28:10,282 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:10,282 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148890282"}]},"ts":"1732148890282"} 2024-11-21T00:28:10,291 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:28:10,292 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:10,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 655 msec 2024-11-21T00:28:10,316 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:28:10,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:10,788 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:28:10,788 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:10,789 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:10,789 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@528102d8 2024-11-21T00:28:10,789 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:10,791 INFO [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37530, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:10,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:28:10,796 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:10,796 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:10,797 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:10,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:28:10,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:10,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:28:10,829 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ea4ec3b5aa2624682520b354168ef297, NAME => 'test,,1732148890791.ea4ec3b5aa2624682520b354168ef297.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a 2024-11-21T00:28:10,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:28:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:11,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:11,278 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148890791.ea4ec3b5aa2624682520b354168ef297.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:11,278 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing ea4ec3b5aa2624682520b354168ef297, disabling compactions & flushes 2024-11-21T00:28:11,278 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:11,278 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:11,278 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. after waiting 0 ms 2024-11-21T00:28:11,278 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:11,278 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:11,278 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for ea4ec3b5aa2624682520b354168ef297: Waiting for close lock at 1732148891278Disabling compacts and flushes for region at 1732148891278Disabling writes for close at 1732148891278Writing region close event to WAL at 1732148891278Closed at 1732148891278 2024-11-21T00:28:11,280 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:11,280 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148890791.ea4ec3b5aa2624682520b354168ef297.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148891280"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148891280"}]},"ts":"1732148891280"} 2024-11-21T00:28:11,282 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:11,282 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:11,283 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148891283"}]},"ts":"1732148891283"} 2024-11-21T00:28:11,286 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:28:11,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=ea4ec3b5aa2624682520b354168ef297, ASSIGN}] 2024-11-21T00:28:11,288 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=ea4ec3b5aa2624682520b354168ef297, ASSIGN 2024-11-21T00:28:11,289 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=ea4ec3b5aa2624682520b354168ef297, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,43643,1732148882416; forceNewPlan=false, retain=false 2024-11-21T00:28:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:11,444 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea4ec3b5aa2624682520b354168ef297, regionState=OPENING, regionLocation=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:11,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=ea4ec3b5aa2624682520b354168ef297, ASSIGN because future has completed 2024-11-21T00:28:11,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea4ec3b5aa2624682520b354168ef297, server=5ed4808ef0e6,43643,1732148882416}] 2024-11-21T00:28:11,643 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:11,643 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ea4ec3b5aa2624682520b354168ef297, NAME => 'test,,1732148890791.ea4ec3b5aa2624682520b354168ef297.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:11,643 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:11,643 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:28:11,643 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,644 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148890791.ea4ec3b5aa2624682520b354168ef297.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:11,644 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,644 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,648 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,650 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea4ec3b5aa2624682520b354168ef297 columnFamilyName f 2024-11-21T00:28:11,651 DEBUG [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:11,651 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] regionserver.HStore(327): Store=ea4ec3b5aa2624682520b354168ef297/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:11,656 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,657 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea4ec3b5aa2624682520b354168ef297 columnFamilyName f1 2024-11-21T00:28:11,657 DEBUG [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:11,661 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] regionserver.HStore(327): Store=ea4ec3b5aa2624682520b354168ef297/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:11,661 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,666 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ea4ec3b5aa2624682520b354168ef297 columnFamilyName norep 2024-11-21T00:28:11,666 DEBUG [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:11,669 INFO [StoreOpener-ea4ec3b5aa2624682520b354168ef297-1 {}] regionserver.HStore(327): Store=ea4ec3b5aa2624682520b354168ef297/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:11,669 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,670 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,670 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,673 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,673 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,673 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:11,675 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,679 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:11,679 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ea4ec3b5aa2624682520b354168ef297; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63558154, jitterRate=-0.05290970206260681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:11,679 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:11,679 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ea4ec3b5aa2624682520b354168ef297: Running coprocessor pre-open hook at 1732148891644Writing region info on filesystem at 1732148891644Initializing all the Stores at 1732148891646 (+2 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148891646Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148891648 (+2 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148891648Cleaning up temporary data from old regions at 1732148891673 (+25 ms)Running coprocessor post-open hooks at 1732148891679 (+6 ms)Region opened successfully at 1732148891679 2024-11-21T00:28:11,680 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148890791.ea4ec3b5aa2624682520b354168ef297., pid=6, masterSystemTime=1732148891616 2024-11-21T00:28:11,683 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:11,683 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:11,683 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ea4ec3b5aa2624682520b354168ef297, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:11,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ea4ec3b5aa2624682520b354168ef297, server=5ed4808ef0e6,43643,1732148882416 because future has completed 2024-11-21T00:28:11,700 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:28:11,701 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ea4ec3b5aa2624682520b354168ef297, server=5ed4808ef0e6,43643,1732148882416 in 243 msec 2024-11-21T00:28:11,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:28:11,712 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=ea4ec3b5aa2624682520b354168ef297, ASSIGN in 414 msec 2024-11-21T00:28:11,714 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:11,714 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148891714"}]},"ts":"1732148891714"} 2024-11-21T00:28:11,717 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:28:11,718 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:11,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 928 msec 2024-11-21T00:28:11,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:11,938 INFO [RPCClient-NioEventLoopGroup-4-5 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:28:11,938 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:11,940 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:11,940 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3486a0e6 2024-11-21T00:28:11,941 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:11,944 INFO [HMaster-EventLoopGroup-41-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45608, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:11,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:11,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:28:11,947 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:11,947 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:11,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:28:11,948 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:11,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:12,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:28:12,017 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ef3b8661a5d0aea3ac56d3688db20d05, NAME => 'test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d 2024-11-21T00:28:12,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:28:12,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:12,058 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:12,058 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing ef3b8661a5d0aea3ac56d3688db20d05, disabling compactions & flushes 2024-11-21T00:28:12,058 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:12,058 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:12,058 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. after waiting 0 ms 2024-11-21T00:28:12,058 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:12,058 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:12,059 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for ef3b8661a5d0aea3ac56d3688db20d05: Waiting for close lock at 1732148892058Disabling compacts and flushes for region at 1732148892058Disabling writes for close at 1732148892058Writing region close event to WAL at 1732148892058Closed at 1732148892058 2024-11-21T00:28:12,060 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:12,060 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148892060"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148892060"}]},"ts":"1732148892060"} 2024-11-21T00:28:12,062 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:12,063 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:12,063 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148892063"}]},"ts":"1732148892063"} 2024-11-21T00:28:12,065 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:28:12,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=ef3b8661a5d0aea3ac56d3688db20d05, ASSIGN}] 2024-11-21T00:28:12,068 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=ef3b8661a5d0aea3ac56d3688db20d05, ASSIGN 2024-11-21T00:28:12,069 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=ef3b8661a5d0aea3ac56d3688db20d05, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,45849,1732148887589; forceNewPlan=false, retain=false 2024-11-21T00:28:12,220 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ef3b8661a5d0aea3ac56d3688db20d05, regionState=OPENING, regionLocation=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:12,223 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=ef3b8661a5d0aea3ac56d3688db20d05, ASSIGN because future has completed 2024-11-21T00:28:12,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef3b8661a5d0aea3ac56d3688db20d05, server=5ed4808ef0e6,45849,1732148887589}] 2024-11-21T00:28:12,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:12,430 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:12,430 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ef3b8661a5d0aea3ac56d3688db20d05, NAME => 'test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:12,431 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:12,431 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:28:12,431 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,431 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:12,431 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,431 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,452 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,454 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef3b8661a5d0aea3ac56d3688db20d05 columnFamilyName f 2024-11-21T00:28:12,454 DEBUG [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:12,454 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] regionserver.HStore(327): Store=ef3b8661a5d0aea3ac56d3688db20d05/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:12,454 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,455 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef3b8661a5d0aea3ac56d3688db20d05 columnFamilyName f1 2024-11-21T00:28:12,455 DEBUG [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:12,456 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] regionserver.HStore(327): Store=ef3b8661a5d0aea3ac56d3688db20d05/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:12,456 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,457 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ef3b8661a5d0aea3ac56d3688db20d05 columnFamilyName norep 2024-11-21T00:28:12,457 DEBUG [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:12,457 INFO [StoreOpener-ef3b8661a5d0aea3ac56d3688db20d05-1 {}] regionserver.HStore(327): Store=ef3b8661a5d0aea3ac56d3688db20d05/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:12,457 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,458 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,458 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,459 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,459 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,459 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:12,460 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,468 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:12,469 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ef3b8661a5d0aea3ac56d3688db20d05; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71318641, jitterRate=0.0627305656671524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:12,469 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:12,469 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ef3b8661a5d0aea3ac56d3688db20d05: Running coprocessor pre-open hook at 1732148892431Writing region info on filesystem at 1732148892431Initializing all the Stores at 1732148892432 (+1 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148892432Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148892452 (+20 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148892452Cleaning up temporary data from old regions at 1732148892459 (+7 ms)Running coprocessor post-open hooks at 1732148892469 (+10 ms)Region opened successfully at 1732148892469 2024-11-21T00:28:12,472 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05., pid=6, masterSystemTime=1732148892400 2024-11-21T00:28:12,485 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:12,485 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:12,489 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ef3b8661a5d0aea3ac56d3688db20d05, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:12,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ef3b8661a5d0aea3ac56d3688db20d05, server=5ed4808ef0e6,45849,1732148887589 because future has completed 2024-11-21T00:28:12,528 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:28:12,529 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ef3b8661a5d0aea3ac56d3688db20d05, server=5ed4808ef0e6,45849,1732148887589 in 288 msec 2024-11-21T00:28:12,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:28:12,541 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=ef3b8661a5d0aea3ac56d3688db20d05, ASSIGN in 464 msec 2024-11-21T00:28:12,544 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:12,545 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148892545"}]},"ts":"1732148892545"} 2024-11-21T00:28:12,547 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:28:12,548 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:12,557 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 603 msec 2024-11-21T00:28:12,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:12,579 INFO [RPCClient-NioEventLoopGroup-4-6 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:28:12,588 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b12c030, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:12,588 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42535,-1 for getting cluster id 2024-11-21T00:28:12,589 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:12,594 DEBUG [HMaster-EventLoopGroup-37-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f531fde5-6173-43d1-970f-150eb244b957' 2024-11-21T00:28:12,594 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:12,594 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f531fde5-6173-43d1-970f-150eb244b957" 2024-11-21T00:28:12,594 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23bd94b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:12,594 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42535,-1] 2024-11-21T00:28:12,595 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:12,595 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:12,596 INFO [HMaster-EventLoopGroup-37-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46370, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:12,596 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f068226, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:12,597 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:12,598 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42535,1732148876091 2024-11-21T00:28:12,598 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@274f282d 2024-11-21T00:28:12,599 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:12,599 INFO [HMaster-EventLoopGroup-37-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46374, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:12,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:38567,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:28:12,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:28:12,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:12,610 DEBUG [PEWorker-1 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:38567' 2024-11-21T00:28:12,616 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17930c4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:12,616 DEBUG [PEWorker-1 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:12,617 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:12,620 DEBUG [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:12,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:12,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:12,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3018e316, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:12,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:12,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:12,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:12,622 INFO [HMaster-EventLoopGroup-39-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50200, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:12,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d977afd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:12,623 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:12,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:12,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@45f5468d 2024-11-21T00:28:12,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:12,625 INFO [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50212, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:12,625 INFO [PEWorker-1 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-1. 2024-11-21T00:28:12,625 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:28:12,625 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:12,625 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:12,626 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:12,626 INFO [PEWorker-1 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:12,627 DEBUG [PEWorker-1 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:28:12,628 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:12,628 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:12,629 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:12,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:28:12,664 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 75a2a836d409c649a5c103e0b1258bf3, NAME => 'hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5 2024-11-21T00:28:12,678 DEBUG [PEWorker-1 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:28:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:28:12,703 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:12,703 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 75a2a836d409c649a5c103e0b1258bf3, disabling compactions & flushes 2024-11-21T00:28:12,703 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:12,703 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:12,703 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. after waiting 0 ms 2024-11-21T00:28:12,703 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:12,703 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:12,703 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 75a2a836d409c649a5c103e0b1258bf3: Waiting for close lock at 1732148892703Disabling compacts and flushes for region at 1732148892703Disabling writes for close at 1732148892703Writing region close event to WAL at 1732148892703Closed at 1732148892703 2024-11-21T00:28:12,704 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:12,704 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148892704"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148892704"}]},"ts":"1732148892704"} 2024-11-21T00:28:12,707 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:12,708 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:12,708 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148892708"}]},"ts":"1732148892708"} 2024-11-21T00:28:12,710 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:28:12,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=75a2a836d409c649a5c103e0b1258bf3, ASSIGN}] 2024-11-21T00:28:12,713 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=75a2a836d409c649a5c103e0b1258bf3, ASSIGN 2024-11-21T00:28:12,714 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=75a2a836d409c649a5c103e0b1258bf3, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,45749,1732148876424; forceNewPlan=false, retain=false 2024-11-21T00:28:12,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:12,865 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=75a2a836d409c649a5c103e0b1258bf3, regionState=OPENING, regionLocation=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:28:12,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=75a2a836d409c649a5c103e0b1258bf3, ASSIGN because future has completed 2024-11-21T00:28:12,868 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 75a2a836d409c649a5c103e0b1258bf3, server=5ed4808ef0e6,45749,1732148876424}] 2024-11-21T00:28:12,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:13,041 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:13,041 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:13,041 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:28:13,042 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45749%2C1732148876424.rep, suffix=, logDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424, archiveDir=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/oldWALs, maxLogs=10 2024-11-21T00:28:13,080 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.rep.1732148893043, exclude list is [], retry=0 2024-11-21T00:28:13,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42735,DS-8b268792-34c9-45e7-a4c2-f58b45d879c0,DISK] 2024-11-21T00:28:13,124 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.rep.1732148893043 2024-11-21T00:28:13,152 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45503:45503)] 2024-11-21T00:28:13,152 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 75a2a836d409c649a5c103e0b1258bf3, NAME => 'hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:13,153 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:13,153 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:13,153 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. service=MultiRowMutationService 2024-11-21T00:28:13,153 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:28:13,153 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,153 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:13,153 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,153 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,176 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,178 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75a2a836d409c649a5c103e0b1258bf3 columnFamilyName hfileref 2024-11-21T00:28:13,178 DEBUG [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:13,178 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] regionserver.HStore(327): Store=75a2a836d409c649a5c103e0b1258bf3/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:13,178 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,179 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75a2a836d409c649a5c103e0b1258bf3 columnFamilyName queue 2024-11-21T00:28:13,179 DEBUG [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:13,179 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] regionserver.HStore(327): Store=75a2a836d409c649a5c103e0b1258bf3/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:13,180 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,180 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75a2a836d409c649a5c103e0b1258bf3 columnFamilyName sid 2024-11-21T00:28:13,181 DEBUG [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:13,181 INFO [StoreOpener-75a2a836d409c649a5c103e0b1258bf3-1 {}] regionserver.HStore(327): Store=75a2a836d409c649a5c103e0b1258bf3/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:13,181 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,182 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,182 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,183 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,183 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,183 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:13,184 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,192 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:13,197 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 75a2a836d409c649a5c103e0b1258bf3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62809507, jitterRate=-0.06406541168689728}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:13,198 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:13,198 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 75a2a836d409c649a5c103e0b1258bf3: Running coprocessor pre-open hook at 1732148893154Writing region info on filesystem at 1732148893154Initializing all the Stores at 1732148893154Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148893154Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148893176 (+22 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148893176Cleaning up temporary data from old regions at 1732148893183 (+7 ms)Running coprocessor post-open hooks at 1732148893198 (+15 ms)Region opened successfully at 1732148893198 2024-11-21T00:28:13,200 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3., pid=10, masterSystemTime=1732148893025 2024-11-21T00:28:13,203 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=75a2a836d409c649a5c103e0b1258bf3, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,45749,1732148876424 2024-11-21T00:28:13,204 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:13,204 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:13,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 75a2a836d409c649a5c103e0b1258bf3, server=5ed4808ef0e6,45749,1732148876424 because future has completed 2024-11-21T00:28:13,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:28:13,222 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 75a2a836d409c649a5c103e0b1258bf3, server=5ed4808ef0e6,45749,1732148876424 in 352 msec 2024-11-21T00:28:13,224 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:28:13,224 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=75a2a836d409c649a5c103e0b1258bf3, ASSIGN in 512 msec 2024-11-21T00:28:13,225 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:13,225 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148893225"}]},"ts":"1732148893225"} 2024-11-21T00:28:13,230 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:28:13,230 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:13,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 605 msec 2024-11-21T00:28:13,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3., hostname=5ed4808ef0e6,45749,1732148876424, seqNum=2] 2024-11-21T00:28:13,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:13,436 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:28:13,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:28:13,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:28:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:13,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45749 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:28:13,784 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:28:13,873 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,45749,1732148876424, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:28:13,874 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:13,874 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45749,1732148876424, seqNum=-1] 2024-11-21T00:28:13,875 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:13,876 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-38-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52591, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.17 (auth:SIMPLE), service=ClientService 2024-11-21T00:28:13,878 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-38-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,45749,1732148876424', locateType=CURRENT is [region=hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3., hostname=5ed4808ef0e6,45749,1732148876424, seqNum=2] 2024-11-21T00:28:14,221 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:14,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:14,945 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:14,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:14,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:14,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:14,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,013 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,014 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,045 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:15,068 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:28:16,317 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:28:16,453 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1049586934_20 at /127.0.0.1:41058 [Receiving block BP-177379055-172.17.0.2-1732148873310:blk_1073741839_1015] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2574ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/data/data1/, blockId=1073741839, seqno=2 2024-11-21T00:28:16,454 INFO [AsyncFSWAL-0-hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5-prefix:5ed4808ef0e6,45749,1732148876424.rep {}] wal.AbstractFSWAL(1368): Slow sync cost: 2574 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42735,DS-8b268792-34c9-45e7-a4c2-f58b45d879c0,DISK]] 2024-11-21T00:28:16,454 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:28:16,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:28:16,459 INFO [PEWorker-2 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,45749,1732148876424 suceeded 2024-11-21T00:28:16,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:28:16,462 INFO [PEWorker-4 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:38567,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:28:16,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 2.8480 sec 2024-11-21T00:28:16,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 3.8620 sec 2024-11-21T00:28:16,480 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:38567' 2024-11-21T00:28:16,482 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@9c42b04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,482 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:16,483 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:16,483 DEBUG [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:16,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:16,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:16,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@df8034, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:16,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:16,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:16,485 INFO [HMaster-EventLoopGroup-39-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50230, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.17 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:16,486 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@550f940c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,486 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:16,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:16,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3b5fd73 2024-11-21T00:28:16,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:16,488 INFO [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50238, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.17 (auth:SIMPLE), service=MasterService 2024-11-21T00:28:16,489 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,45749,1732148876424 (queues=1) is replicating from cluster=f531fde5-6173-43d1-970f-150eb244b957 to cluster=8d153efa-dd7f-4816-8e94-3c313345b6d9 2024-11-21T00:28:16,489 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C45749%2C1732148876424 2024-11-21T00:28:16,489 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,45749,1732148876424, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:28:16,496 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003, startPosition=0, beingWritten=true 2024-11-21T00:28:16,500 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C45749%2C1732148876424 2024-11-21T00:28:16,530 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:16,530 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 379, reset compression=false 2024-11-21T00:28:16,531 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45749,1732148876424 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:16,745 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 379, reset compression=false 2024-11-21T00:28:16,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42535 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:16,781 INFO [RPCClient-NioEventLoopGroup-4-9 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:28:16,781 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:16,781 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:418) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:16,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:16,782 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:16,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:16,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e9f8a3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,783 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:16,783 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:16,784 DEBUG [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:16,784 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:16,784 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:16,784 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23743f9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,784 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:16,786 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:16,787 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:16,787 INFO [HMaster-EventLoopGroup-39-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50256, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:16,788 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ade274b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,788 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:16,790 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:16,790 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@9529131 2024-11-21T00:28:16,790 DEBUG [RPCClient-NioEventLoopGroup-4-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:16,792 INFO [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50272, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:16,793 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:35935,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:28:16,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:28:16,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:16,797 DEBUG [PEWorker-5 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:35935' 2024-11-21T00:28:16,800 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ef9aab2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,800 DEBUG [PEWorker-5 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35935,-1 for getting cluster id 2024-11-21T00:28:16,801 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:16,807 DEBUG [HMaster-EventLoopGroup-41-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '727cbd73-2263-4ee7-a05f-23c795f85fd6' 2024-11-21T00:28:16,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:16,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "727cbd73-2263-4ee7-a05f-23c795f85fd6" 2024-11-21T00:28:16,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d3f079, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35935,-1] 2024-11-21T00:28:16,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:16,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:16,809 INFO [HMaster-EventLoopGroup-41-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45638, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:16,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@641254fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:16,810 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:16,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:16,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@71814976 2024-11-21T00:28:16,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:16,812 INFO [HMaster-EventLoopGroup-41-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45650, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:16,812 INFO [PEWorker-5 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-5. 2024-11-21T00:28:16,812 DEBUG [PEWorker-5 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:28:16,812 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:16,813 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:16,813 INFO [PEWorker-5 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:16,814 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:16,814 DEBUG [PEWorker-5 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:28:16,815 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:16,815 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:16,816 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:16,865 DEBUG [PEWorker-5 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:28:16,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:28:16,899 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d2567d2d5e06963377ad15ea0bba477d, NAME => 'hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a 2024-11-21T00:28:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:16,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:28:16,926 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:16,926 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing d2567d2d5e06963377ad15ea0bba477d, disabling compactions & flushes 2024-11-21T00:28:16,926 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:16,926 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:16,926 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. after waiting 0 ms 2024-11-21T00:28:16,926 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:16,926 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:16,927 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for d2567d2d5e06963377ad15ea0bba477d: Waiting for close lock at 1732148896926Disabling compacts and flushes for region at 1732148896926Disabling writes for close at 1732148896926Writing region close event to WAL at 1732148896926Closed at 1732148896926 2024-11-21T00:28:16,928 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:16,928 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148896928"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148896928"}]},"ts":"1732148896928"} 2024-11-21T00:28:16,934 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:16,935 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:16,936 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148896935"}]},"ts":"1732148896935"} 2024-11-21T00:28:16,938 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:28:16,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d2567d2d5e06963377ad15ea0bba477d, ASSIGN}] 2024-11-21T00:28:16,940 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d2567d2d5e06963377ad15ea0bba477d, ASSIGN 2024-11-21T00:28:16,941 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=d2567d2d5e06963377ad15ea0bba477d, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,43643,1732148882416; forceNewPlan=false, retain=false 2024-11-21T00:28:17,072 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 379, reset compression=false 2024-11-21T00:28:17,093 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d2567d2d5e06963377ad15ea0bba477d, regionState=OPENING, regionLocation=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:17,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=d2567d2d5e06963377ad15ea0bba477d, ASSIGN because future has completed 2024-11-21T00:28:17,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2567d2d5e06963377ad15ea0bba477d, server=5ed4808ef0e6,43643,1732148882416}] 2024-11-21T00:28:17,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:17,263 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:17,263 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:17,263 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:28:17,266 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C43643%2C1732148882416.rep, suffix=, logDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416, archiveDir=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/oldWALs, maxLogs=10 2024-11-21T00:28:17,285 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.rep.1732148897266, exclude list is [], retry=0 2024-11-21T00:28:17,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33609,DS-814fcd31-845d-4bd2-aac0-e6306a17174f,DISK] 2024-11-21T00:28:17,303 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.rep.1732148897266 2024-11-21T00:28:17,307 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37401:37401)] 2024-11-21T00:28:17,307 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => d2567d2d5e06963377ad15ea0bba477d, NAME => 'hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:17,307 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:17,307 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:17,307 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. service=MultiRowMutationService 2024-11-21T00:28:17,308 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:28:17,308 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,308 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:17,308 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,308 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,310 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,313 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2567d2d5e06963377ad15ea0bba477d columnFamilyName hfileref 2024-11-21T00:28:17,313 DEBUG [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:17,314 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] regionserver.HStore(327): Store=d2567d2d5e06963377ad15ea0bba477d/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:17,314 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,315 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2567d2d5e06963377ad15ea0bba477d columnFamilyName queue 2024-11-21T00:28:17,315 DEBUG [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:17,315 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] regionserver.HStore(327): Store=d2567d2d5e06963377ad15ea0bba477d/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:17,315 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,316 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d2567d2d5e06963377ad15ea0bba477d columnFamilyName sid 2024-11-21T00:28:17,316 DEBUG [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:17,317 INFO [StoreOpener-d2567d2d5e06963377ad15ea0bba477d-1 {}] regionserver.HStore(327): Store=d2567d2d5e06963377ad15ea0bba477d/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:17,317 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,318 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,318 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,319 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:17,320 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,329 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:17,329 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened d2567d2d5e06963377ad15ea0bba477d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75073425, jitterRate=0.11868120729923248}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:17,329 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:17,330 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for d2567d2d5e06963377ad15ea0bba477d: Running coprocessor pre-open hook at 1732148897308Writing region info on filesystem at 1732148897308Initializing all the Stores at 1732148897309 (+1 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148897309Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148897310 (+1 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148897310Cleaning up temporary data from old regions at 1732148897319 (+9 ms)Running coprocessor post-open hooks at 1732148897329 (+10 ms)Region opened successfully at 1732148897329 2024-11-21T00:28:17,330 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d., pid=10, masterSystemTime=1732148897254 2024-11-21T00:28:17,333 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:17,333 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:17,334 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=d2567d2d5e06963377ad15ea0bba477d, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:17,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure d2567d2d5e06963377ad15ea0bba477d, server=5ed4808ef0e6,43643,1732148882416 because future has completed 2024-11-21T00:28:17,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:28:17,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure d2567d2d5e06963377ad15ea0bba477d, server=5ed4808ef0e6,43643,1732148882416 in 247 msec 2024-11-21T00:28:17,354 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:28:17,354 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=d2567d2d5e06963377ad15ea0bba477d, ASSIGN in 413 msec 2024-11-21T00:28:17,355 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:17,355 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148897355"}]},"ts":"1732148897355"} 2024-11-21T00:28:17,356 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:28:17,359 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:17,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 546 msec 2024-11-21T00:28:17,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-40-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d., hostname=5ed4808ef0e6,43643,1732148882416, seqNum=2] 2024-11-21T00:28:17,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:17,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:17,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:17,441 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:28:17,488 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 379, reset compression=false 2024-11-21T00:28:17,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43643 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:28:17,596 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:28:17,640 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,43643,1732148882416, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:28:17,642 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:17,642 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43643,1732148882416, seqNum=-1] 2024-11-21T00:28:17,643 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:17,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48825, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.18 (auth:SIMPLE), service=ClientService 2024-11-21T00:28:17,650 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,43643,1732148882416', locateType=CURRENT is [region=hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d., hostname=5ed4808ef0e6,43643,1732148882416, seqNum=2] 2024-11-21T00:28:17,661 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:28:17,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:28:17,664 INFO [PEWorker-3 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,43643,1732148882416 suceeded 2024-11-21T00:28:17,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:28:17,667 INFO [PEWorker-1 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:35935,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:28:17,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 223 msec 2024-11-21T00:28:17,669 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 874 msec 2024-11-21T00:28:17,689 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:35935' 2024-11-21T00:28:17,719 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@14085ee8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,719 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35935,-1 for getting cluster id 2024-11-21T00:28:17,719 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:17,728 DEBUG [HMaster-EventLoopGroup-41-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '727cbd73-2263-4ee7-a05f-23c795f85fd6' 2024-11-21T00:28:17,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:17,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "727cbd73-2263-4ee7-a05f-23c795f85fd6" 2024-11-21T00:28:17,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@62226703, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35935,-1] 2024-11-21T00:28:17,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:17,729 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:17,730 INFO [HMaster-EventLoopGroup-41-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45658, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.18 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:17,731 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@2dc0d7dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,731 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:17,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:17,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@77c1b681 2024-11-21T00:28:17,732 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:17,740 INFO [HMaster-EventLoopGroup-41-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45670, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.18 (auth:SIMPLE), service=MasterService 2024-11-21T00:28:17,741 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,43643,1732148882416 (queues=1) is replicating from cluster=8d153efa-dd7f-4816-8e94-3c313345b6d9 to cluster=727cbd73-2263-4ee7-a05f-23c795f85fd6 2024-11-21T00:28:17,741 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C43643%2C1732148882416 2024-11-21T00:28:17,741 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,43643,1732148882416, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:28:17,749 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, startPosition=0, beingWritten=true 2024-11-21T00:28:17,761 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C43643%2C1732148882416 2024-11-21T00:28:17,798 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:17,798 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 379, reset compression=false 2024-11-21T00:28:17,798 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:17,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38567 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:17,938 INFO [RPCClient-NioEventLoopGroup-4-12 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:28:17,938 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:17,938 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:419) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:17,939 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:17,939 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:17,939 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:17,950 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bcaa348, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,950 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35935,-1 for getting cluster id 2024-11-21T00:28:17,951 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:17,952 DEBUG [HMaster-EventLoopGroup-41-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '727cbd73-2263-4ee7-a05f-23c795f85fd6' 2024-11-21T00:28:17,952 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:17,952 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "727cbd73-2263-4ee7-a05f-23c795f85fd6" 2024-11-21T00:28:17,953 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6528a587, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,953 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35935,-1] 2024-11-21T00:28:17,953 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:17,953 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:17,954 INFO [HMaster-EventLoopGroup-41-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45680, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:17,955 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e9d414b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,955 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:17,956 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:17,956 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@54952ee8 2024-11-21T00:28:17,956 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:17,957 INFO [HMaster-EventLoopGroup-41-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45688, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:17,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:38567,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:28:17,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:28:17,960 DEBUG [PEWorker-5 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:38567' 2024-11-21T00:28:17,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:17,975 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@546ad2c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,975 DEBUG [PEWorker-5 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:17,975 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:17,976 DEBUG [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:17,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:17,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:17,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77dd16ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:17,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:17,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:17,977 INFO [HMaster-EventLoopGroup-39-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50282, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:17,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8bd9ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:17,978 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:17,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:17,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@52e50e8a 2024-11-21T00:28:17,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:17,981 INFO [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:17,981 INFO [PEWorker-5 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-5. 2024-11-21T00:28:17,982 DEBUG [PEWorker-5 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:28:17,982 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:17,982 DEBUG [PEWorker-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:17,982 INFO [PEWorker-5 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:17,983 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:17,983 DEBUG [PEWorker-5 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:28:17,984 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:17,985 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:17,985 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:18,020 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 379, reset compression=false 2024-11-21T00:28:18,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:28:18,024 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 77cd4c08441e114fb1e8cfa9a38f0728, NAME => 'hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d 2024-11-21T00:28:18,034 DEBUG [PEWorker-5 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:28:18,037 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 379, reset compression=false 2024-11-21T00:28:18,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:18,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:28:18,110 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:18,111 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 77cd4c08441e114fb1e8cfa9a38f0728, disabling compactions & flushes 2024-11-21T00:28:18,111 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:18,111 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:18,111 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. after waiting 0 ms 2024-11-21T00:28:18,111 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:18,111 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:18,111 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 77cd4c08441e114fb1e8cfa9a38f0728: Waiting for close lock at 1732148898110Disabling compacts and flushes for region at 1732148898110Disabling writes for close at 1732148898111 (+1 ms)Writing region close event to WAL at 1732148898111Closed at 1732148898111 2024-11-21T00:28:18,112 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:18,112 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148898112"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148898112"}]},"ts":"1732148898112"} 2024-11-21T00:28:18,114 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:18,116 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:18,117 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148898116"}]},"ts":"1732148898116"} 2024-11-21T00:28:18,132 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:28:18,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=77cd4c08441e114fb1e8cfa9a38f0728, ASSIGN}] 2024-11-21T00:28:18,134 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=77cd4c08441e114fb1e8cfa9a38f0728, ASSIGN 2024-11-21T00:28:18,135 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=77cd4c08441e114fb1e8cfa9a38f0728, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,45849,1732148887589; forceNewPlan=false, retain=false 2024-11-21T00:28:18,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:18,285 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=77cd4c08441e114fb1e8cfa9a38f0728, regionState=OPENING, regionLocation=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:18,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=77cd4c08441e114fb1e8cfa9a38f0728, ASSIGN because future has completed 2024-11-21T00:28:18,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77cd4c08441e114fb1e8cfa9a38f0728, server=5ed4808ef0e6,45849,1732148887589}] 2024-11-21T00:28:18,372 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 379, reset compression=false 2024-11-21T00:28:18,460 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:18,460 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:18,460 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:28:18,462 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45849%2C1732148887589.rep, suffix=, logDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589, archiveDir=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/oldWALs, maxLogs=10 2024-11-21T00:28:18,478 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.rep.1732148898462, exclude list is [], retry=0 2024-11-21T00:28:18,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7ce8dff-c4a9-4000-97b7-4c81619a63a4,DISK] 2024-11-21T00:28:18,502 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.rep.1732148898462 2024-11-21T00:28:18,504 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46429:46429)] 2024-11-21T00:28:18,504 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 77cd4c08441e114fb1e8cfa9a38f0728, NAME => 'hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:18,505 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:18,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:18,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. service=MultiRowMutationService 2024-11-21T00:28:18,505 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:28:18,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:18,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,505 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,510 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,511 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77cd4c08441e114fb1e8cfa9a38f0728 columnFamilyName hfileref 2024-11-21T00:28:18,511 DEBUG [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:18,511 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] regionserver.HStore(327): Store=77cd4c08441e114fb1e8cfa9a38f0728/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:18,512 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,513 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77cd4c08441e114fb1e8cfa9a38f0728 columnFamilyName queue 2024-11-21T00:28:18,513 DEBUG [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:18,513 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] regionserver.HStore(327): Store=77cd4c08441e114fb1e8cfa9a38f0728/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:18,513 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,514 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77cd4c08441e114fb1e8cfa9a38f0728 columnFamilyName sid 2024-11-21T00:28:18,514 DEBUG [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:18,514 INFO [StoreOpener-77cd4c08441e114fb1e8cfa9a38f0728-1 {}] regionserver.HStore(327): Store=77cd4c08441e114fb1e8cfa9a38f0728/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:18,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/replication/77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,515 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/replication/77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,516 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,516 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,517 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:18,517 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,519 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/replication/77cd4c08441e114fb1e8cfa9a38f0728/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:18,521 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 77cd4c08441e114fb1e8cfa9a38f0728; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70135665, jitterRate=0.04510284960269928}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:18,521 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:18,521 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 77cd4c08441e114fb1e8cfa9a38f0728: Running coprocessor pre-open hook at 1732148898505Writing region info on filesystem at 1732148898505Initializing all the Stores at 1732148898509 (+4 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148898509Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148898510 (+1 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148898510Cleaning up temporary data from old regions at 1732148898516 (+6 ms)Running coprocessor post-open hooks at 1732148898521 (+5 ms)Region opened successfully at 1732148898521 2024-11-21T00:28:18,522 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728., pid=10, masterSystemTime=1732148898453 2024-11-21T00:28:18,523 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:18,523 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:18,524 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=77cd4c08441e114fb1e8cfa9a38f0728, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:18,536 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77cd4c08441e114fb1e8cfa9a38f0728, server=5ed4808ef0e6,45849,1732148887589 because future has completed 2024-11-21T00:28:18,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:28:18,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 77cd4c08441e114fb1e8cfa9a38f0728, server=5ed4808ef0e6,45849,1732148887589 in 246 msec 2024-11-21T00:28:18,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:28:18,548 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=77cd4c08441e114fb1e8cfa9a38f0728, ASSIGN in 414 msec 2024-11-21T00:28:18,549 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:18,549 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148898549"}]},"ts":"1732148898549"} 2024-11-21T00:28:18,561 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:28:18,563 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:18,566 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 581 msec 2024-11-21T00:28:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:18,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728., hostname=5ed4808ef0e6,45849,1732148887589, seqNum=2] 2024-11-21T00:28:18,633 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 379, reset compression=false 2024-11-21T00:28:18,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:18,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:18,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:28:18,781 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 379, reset compression=false 2024-11-21T00:28:18,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45849 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:28:18,817 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:28:18,854 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,45849,1732148887589, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:28:18,855 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:18,855 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45849,1732148887589, seqNum=-1] 2024-11-21T00:28:18,855 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:18,856 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32979, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.19 (auth:SIMPLE), service=ClientService 2024-11-21T00:28:18,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,45849,1732148887589', locateType=CURRENT is [region=hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728., hostname=5ed4808ef0e6,45849,1732148887589, seqNum=2] 2024-11-21T00:28:18,920 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:28:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:28:18,938 INFO [PEWorker-1 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,45849,1732148887589 suceeded 2024-11-21T00:28:18,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:28:18,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 274 msec 2024-11-21T00:28:18,941 INFO [PEWorker-1 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:38567,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:28:18,943 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 984 msec 2024-11-21T00:28:18,959 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:38567' 2024-11-21T00:28:18,976 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@7092ad28, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:18,976 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:18,977 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:18,978 DEBUG [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:18,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:18,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:18,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3424b599, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:18,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:18,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:18,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:18,987 INFO [HMaster-EventLoopGroup-39-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50322, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.19 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:18,988 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3a7c3817, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:18,988 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:18,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:18,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@d9c1322 2024-11-21T00:28:18,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:18,992 INFO [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50328, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.19 (auth:SIMPLE), service=MasterService 2024-11-21T00:28:18,993 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,45849,1732148887589 (queues=1) is replicating from cluster=727cbd73-2263-4ee7-a05f-23c795f85fd6 to cluster=8d153efa-dd7f-4816-8e94-3c313345b6d9 2024-11-21T00:28:18,994 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C45849%2C1732148887589 2024-11-21T00:28:18,994 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,45849,1732148887589, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:28:19,010 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, startPosition=0, beingWritten=true 2024-11-21T00:28:19,021 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C45849%2C1732148887589 2024-11-21T00:28:19,053 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:19,053 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 379, reset compression=false 2024-11-21T00:28:19,053 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35935 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:19,099 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:28:19,099 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:19,099 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:420) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:19,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:19,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:19,102 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:19,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d6a731, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,113 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42535,-1 for getting cluster id 2024-11-21T00:28:19,113 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:19,114 DEBUG [HMaster-EventLoopGroup-37-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f531fde5-6173-43d1-970f-150eb244b957' 2024-11-21T00:28:19,114 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:19,114 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f531fde5-6173-43d1-970f-150eb244b957" 2024-11-21T00:28:19,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9fc3dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42535,-1] 2024-11-21T00:28:19,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:19,115 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:19,116 INFO [HMaster-EventLoopGroup-37-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46388, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:19,117 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30aab772, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58823de1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,127 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:19,128 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:19,128 DEBUG [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:19,129 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:19,129 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:19,129 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ac4d06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,129 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:19,129 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:19,129 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:19,130 INFO [HMaster-EventLoopGroup-39-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:19,131 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76416d73, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,136 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5102d7f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,140 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35935,-1 for getting cluster id 2024-11-21T00:28:19,141 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:19,142 DEBUG [HMaster-EventLoopGroup-41-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '727cbd73-2263-4ee7-a05f-23c795f85fd6' 2024-11-21T00:28:19,142 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:19,142 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "727cbd73-2263-4ee7-a05f-23c795f85fd6" 2024-11-21T00:28:19,142 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b30d30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,142 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35935,-1] 2024-11-21T00:28:19,142 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:19,143 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:19,144 INFO [HMaster-EventLoopGroup-41-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45706, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:19,144 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa0dcd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,145 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:19,147 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45749,1732148876424, seqNum=-1] 2024-11-21T00:28:19,147 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:19,148 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-38-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42884, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:19,150 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0., hostname=5ed4808ef0e6,45749,1732148876424, seqNum=2] 2024-11-21T00:28:19,159 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:19,161 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45849,1732148887589, seqNum=-1] 2024-11-21T00:28:19,162 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:19,163 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58664, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:19,166 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05., hostname=5ed4808ef0e6,45849,1732148887589, seqNum=2] 2024-11-21T00:28:19,177 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row. IsDeleteReplication:false 2024-11-21T00:28:19,289 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 379, reset compression=false 2024-11-21T00:28:19,296 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 379, reset compression=false 2024-11-21T00:28:19,344 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 379, reset compression=false 2024-11-21T00:28:19,402 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:19,402 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 480, reset compression=false 2024-11-21T00:28:19,402 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45749,1732148876424 got entry batch from reader: WALEntryBatch [walEntries=[{test/e9f5ea62361f185c33b911c4d081b3e0/4=[#edits: 1 = ],199}], lastWalPath=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003, lastWalPosition=480, nbRowKeys=1, nbHFiles=0, heapSize=199, lastSeqIds={}, endOfFile=false,usedBufferSize=199] 2024-11-21T00:28:19,405 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:19,406 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36992, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.17 (auth:SIMPLE), service=AdminService 2024-11-21T00:28:19,407 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:19,425 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@189b7253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,425 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,38567,-1 for getting cluster id 2024-11-21T00:28:19,425 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:19,426 DEBUG [HMaster-EventLoopGroup-39-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8d153efa-dd7f-4816-8e94-3c313345b6d9' 2024-11-21T00:28:19,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:19,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8d153efa-dd7f-4816-8e94-3c313345b6d9" 2024-11-21T00:28:19,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@5b1b452d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,38567,-1] 2024-11-21T00:28:19,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:19,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:19,428 INFO [HMaster-EventLoopGroup-39-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50358, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.18 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:19,428 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3b7e69ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,429 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:19,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43643,1732148882416, seqNum=-1] 2024-11-21T00:28:19,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:19,432 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36996, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.18 (auth:SIMPLE), service=ClientService 2024-11-21T00:28:19,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148890791.ea4ec3b5aa2624682520b354168ef297., hostname=5ed4808ef0e6,43643,1732148882416, seqNum=2] 2024-11-21T00:28:19,437 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:19,659 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 379, reset compression=false 2024-11-21T00:28:19,661 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 480, reset compression=false 2024-11-21T00:28:19,940 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 379, reset compression=false 2024-11-21T00:28:19,974 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:19,974 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 504, reset compression=false 2024-11-21T00:28:19,974 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[{test/ea4ec3b5aa2624682520b354168ef297/4=[#edits: 1 = ],215}], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=504, nbRowKeys=1, nbHFiles=0, heapSize=215, lastSeqIds={}, endOfFile=false,usedBufferSize=215] 2024-11-21T00:28:19,980 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 480, reset compression=false 2024-11-21T00:28:19,985 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:19,987 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58672, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.18 (auth:SIMPLE), service=AdminService 2024-11-21T00:28:19,988 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:19,999 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1795d26b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:19,999 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,35935,-1 for getting cluster id 2024-11-21T00:28:19,999 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:20,000 DEBUG [HMaster-EventLoopGroup-41-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '727cbd73-2263-4ee7-a05f-23c795f85fd6' 2024-11-21T00:28:20,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:20,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "727cbd73-2263-4ee7-a05f-23c795f85fd6" 2024-11-21T00:28:20,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@e67139c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:20,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,35935,-1] 2024-11-21T00:28:20,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:20,001 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:20,002 INFO [HMaster-EventLoopGroup-41-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45724, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.19 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:20,003 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@4e05a805, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:20,004 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:20,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45849,1732148887589, seqNum=-1] 2024-11-21T00:28:20,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:20,006 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-42-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58684, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.19 (auth:SIMPLE), service=ClientService 2024-11-21T00:28:20,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-42-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row', locateType=CURRENT is [region=test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05., hostname=5ed4808ef0e6,45849,1732148887589, seqNum=2] 2024-11-21T00:28:20,013 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:20,072 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 379, reset compression=false 2024-11-21T00:28:20,108 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:20,108 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 528, reset compression=false 2024-11-21T00:28:20,108 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=528, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:20,144 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:20,179 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row. IsDeleteReplication:false 2024-11-21T00:28:20,180 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:20,185 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,43643,1732148882416, seqNum=-1] 2024-11-21T00:28:20,185 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:20,187 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37000, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:20,189 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='row1', locateType=CURRENT is [region=test,,1732148890791.ea4ec3b5aa2624682520b354168ef297., hostname=5ed4808ef0e6,43643,1732148882416, seqNum=2] 2024-11-21T00:28:20,197 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 504, reset compression=false 2024-11-21T00:28:20,199 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row1. IsDeleteReplication:false 2024-11-21T00:28:20,222 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:20,222 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 608, reset compression=false 2024-11-21T00:28:20,222 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[{test/ea4ec3b5aa2624682520b354168ef297/5=[#edits: 1 = ],207}], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=608, nbRowKeys=1, nbHFiles=0, heapSize=207, lastSeqIds={}, endOfFile=false,usedBufferSize=207] 2024-11-21T00:28:20,224 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:20,232 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:20,329 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 528, reset compression=false 2024-11-21T00:28:20,361 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:20,361 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 656, reset compression=false 2024-11-21T00:28:20,361 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=656, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:20,443 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 480, reset compression=false 2024-11-21T00:28:20,444 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 608, reset compression=false 2024-11-21T00:28:20,575 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 656, reset compression=false 2024-11-21T00:28:20,760 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 608, reset compression=false 2024-11-21T00:28:20,919 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 656, reset compression=false 2024-11-21T00:28:20,967 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 480, reset compression=false 2024-11-21T00:28:21,201 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row1. IsDeleteReplication:false 2024-11-21T00:28:21,204 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row2. IsDeleteReplication:false 2024-11-21T00:28:21,207 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 608, reset compression=false 2024-11-21T00:28:21,349 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 656, reset compression=false 2024-11-21T00:28:21,358 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:21,358 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 760, reset compression=false 2024-11-21T00:28:21,358 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[{test/ef3b8661a5d0aea3ac56d3688db20d05/6=[#edits: 1 = ],207}], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=760, nbRowKeys=1, nbHFiles=0, heapSize=207, lastSeqIds={}, endOfFile=false,usedBufferSize=207] 2024-11-21T00:28:21,362 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:21,365 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-40-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37016, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.19 (auth:SIMPLE), service=AdminService 2024-11-21T00:28:21,366 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:21,369 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:21,569 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 760, reset compression=false 2024-11-21T00:28:21,583 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 480, reset compression=false 2024-11-21T00:28:21,712 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 608, reset compression=false 2024-11-21T00:28:21,749 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:21,749 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 735, reset compression=false 2024-11-21T00:28:21,749 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=735, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:21,896 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 760, reset compression=false 2024-11-21T00:28:21,981 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 735, reset compression=false 2024-11-21T00:28:22,211 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row2. IsDeleteReplication:false 2024-11-21T00:28:22,217 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row. IsDeleteReplication:true 2024-11-21T00:28:22,297 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 735, reset compression=false 2024-11-21T00:28:22,302 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 480, reset compression=false 2024-11-21T00:28:22,304 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 760, reset compression=false 2024-11-21T00:28:22,361 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:22,361 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:22,364 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45749,1732148876424 got entry batch from reader: WALEntryBatch [walEntries=[{test/e9f5ea62361f185c33b911c4d081b3e0/5=[#edits: 2 = ],271}], lastWalPath=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003, lastWalPosition=636, nbRowKeys=1, nbHFiles=0, heapSize=271, lastSeqIds={}, endOfFile=false,usedBufferSize=271] 2024-11-21T00:28:22,365 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:22,368 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:22,577 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:22,740 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 735, reset compression=false 2024-11-21T00:28:22,757 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:22,757 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 883, reset compression=false 2024-11-21T00:28:22,757 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[{test/ea4ec3b5aa2624682520b354168ef297/7=[#edits: 2 = ],287}], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=883, nbRowKeys=1, nbHFiles=0, heapSize=287, lastSeqIds={}, endOfFile=false,usedBufferSize=287] 2024-11-21T00:28:22,760 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:22,763 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:22,811 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:22,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,877 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 760, reset compression=false 2024-11-21T00:28:22,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,880 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,881 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:22,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,881 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,903 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:22,903 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:22,903 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 932, reset compression=false 2024-11-21T00:28:22,903 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=932, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:22,966 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 883, reset compression=false 2024-11-21T00:28:23,113 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 932, reset compression=false 2024-11-21T00:28:23,218 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row. IsDeleteReplication:true 2024-11-21T00:28:23,230 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row1. IsDeleteReplication:true 2024-11-21T00:28:23,269 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 883, reset compression=false 2024-11-21T00:28:23,278 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:23,278 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1042, reset compression=false 2024-11-21T00:28:23,279 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[{test/ea4ec3b5aa2624682520b354168ef297/8=[#edits: 2 = ],279}], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=1042, nbRowKeys=1, nbHFiles=0, heapSize=279, lastSeqIds={}, endOfFile=false,usedBufferSize=279] 2024-11-21T00:28:23,281 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:23,296 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45849 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:23,309 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:23,417 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 932, reset compression=false 2024-11-21T00:28:23,454 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:23,454 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1082, reset compression=false 2024-11-21T00:28:23,454 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=1082, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:23,517 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1042, reset compression=false 2024-11-21T00:28:23,661 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1082, reset compression=false 2024-11-21T00:28:23,845 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:23,856 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1042, reset compression=false 2024-11-21T00:28:23,973 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1082, reset compression=false 2024-11-21T00:28:24,232 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row1. IsDeleteReplication:true 2024-11-21T00:28:24,260 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1042, reset compression=false 2024-11-21T00:28:24,366 INFO [AsyncFSWAL-0-hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d-prefix:5ed4808ef0e6,45849,1732148887589 {}] wal.AbstractFSWAL(1368): Slow sync cost: 132 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44027,DS-f7ce8dff-c4a9-4000-97b7-4c81619a63a4,DISK]] 2024-11-21T00:28:24,368 INFO [Time-limited test {}] replication.TestMasterReplication(757): Waiting for more time for replication. Row:row2. IsDeleteReplication:true 2024-11-21T00:28:24,390 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1082, reset compression=false 2024-11-21T00:28:24,397 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:28:24,433 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:28:24,433 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1241, reset compression=false 2024-11-21T00:28:24,433 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[{test/ef3b8661a5d0aea3ac56d3688db20d05/9=[#edits: 2 = ],279}], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=1241, nbRowKeys=1, nbHFiles=0, heapSize=279, lastSeqIds={}, endOfFile=false,usedBufferSize=279] 2024-11-21T00:28:24,436 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(298): Started replicating mutations. 2024-11-21T00:28:24,452 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=43643 {}] regionserver.ReplicationSink(302): Finished replicating mutations. 2024-11-21T00:28:24,453 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:24,459 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:28:24,674 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1241, reset compression=false 2024-11-21T00:28:24,767 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1042, reset compression=false 2024-11-21T00:28:24,789 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:24,789 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:24,789 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=1191, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:25,005 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:25,011 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1241, reset compression=false 2024-11-21T00:28:25,185 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:25,346 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:25,370 INFO [Time-limited test {}] replication.TestMasterReplication(764): Obtained row:row2. IsDeleteReplication:true 2024-11-21T00:28:25,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:28:25,379 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:25,379 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:438) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:25,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:25,379 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:25,379 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:28:25,379 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1605092263, stopped=false 2024-11-21T00:28:25,380 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,35935,1732148887414 2024-11-21T00:28:25,380 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:25,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/21019767428/running 2024-11-21T00:28:25,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/21019767428/running 2024-11-21T00:28:25,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:25,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:25,398 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:28:25,398 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:25,398 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:438) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:25,398 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:25,398 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,45849,1732148887589' ***** 2024-11-21T00:28:25,398 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:28:25,400 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:28:25,400 INFO [RS:0;5ed4808ef0e6:45849 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:28:25,400 INFO [RS:0;5ed4808ef0e6:45849 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:28:25,400 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(3091): Received CLOSE for ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:25,400 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:28:25,401 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on znode that does not yet exist, /21019767428/running 2024-11-21T00:28:25,401 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Set watcher on znode that does not yet exist, /21019767428/running 2024-11-21T00:28:25,406 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(3091): Received CLOSE for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:25,406 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:25,406 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:28:25,406 INFO [RS:0;5ed4808ef0e6:45849 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:45849. 2024-11-21T00:28:25,406 DEBUG [RS:0;5ed4808ef0e6:45849 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:25,406 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:25,406 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:28:25,407 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:28:25,407 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:28:25,407 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:28:25,408 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:28:25,408 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1325): Online Regions={ef3b8661a5d0aea3ac56d3688db20d05=test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05., 77cd4c08441e114fb1e8cfa9a38f0728=hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:28:25,408 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 77cd4c08441e114fb1e8cfa9a38f0728, ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:25,409 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:25,409 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:25,409 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:25,409 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:25,410 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:25,410 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:28:25,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ef3b8661a5d0aea3ac56d3688db20d05, disabling compactions & flushes 2024-11-21T00:28:25,411 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:25,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:25,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. after waiting 0 ms 2024-11-21T00:28:25,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:25,411 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ef3b8661a5d0aea3ac56d3688db20d05 3/3 column families, dataSize=276 B heapSize=1.77 KB 2024-11-21T00:28:25,457 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1241, reset compression=false 2024-11-21T00:28:25,468 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/f/478628cea67247e682e0c0c8c23cbef9 is 29, key is row1/f:/1732148903219/DeleteFamily/seqid=0 2024-11-21T00:28:25,468 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/info/b69052ac8f4d436a936eba62d285131e is 147, key is hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728./info:regioninfo/1732148898524/Put/seqid=0 2024-11-21T00:28:25,474 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:28:25,486 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:25,486 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1488, reset compression=false 2024-11-21T00:28:25,486 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45849,1732148887589 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927, lastWalPosition=1488, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:25,488 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,45849,1732148887589: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,45849,1732148887589, walGroup=5ed4808ef0e6%2C45849%2C1732148887589, offset=5ed4808ef0e6%2C45849%2C1732148887589.1732148888927:1488, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:45849 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:28:25,490 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:28:25,490 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:28:25,491 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:28:25,491 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:28:25,491 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:28:25,491 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1528823808, "init": 1048576000, "max": 2306867200, "used": 958691664 }, "NonHeapMemoryUsage": { "committed": 208863232, "init": 7667712, "max": -1, "used": 205682744 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:28:25,492 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:35935 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:35935 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:28:25,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741840_1016 (size=7686) 2024-11-21T00:28:25,517 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/info/b69052ac8f4d436a936eba62d285131e 2024-11-21T00:28:25,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741841_1017 (size=5152) 2024-11-21T00:28:25,542 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/f/478628cea67247e682e0c0c8c23cbef9 2024-11-21T00:28:25,555 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 478628cea67247e682e0c0c8c23cbef9 2024-11-21T00:28:25,589 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/f1/ffa42c970e2a45029bc2816735b9544d is 30, key is row1/f1:/1732148903219/DeleteFamily/seqid=0 2024-11-21T00:28:25,591 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/ns/4250e6c5cd1f42cca7aa7ac0d5b938a8 is 43, key is default/ns:d/1732148889527/Put/seqid=0 2024-11-21T00:28:25,610 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 77cd4c08441e114fb1e8cfa9a38f0728, ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:25,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741842_1018 (size=5158) 2024-11-21T00:28:25,632 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=77 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/f1/ffa42c970e2a45029bc2816735b9544d 2024-11-21T00:28:25,637 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ffa42c970e2a45029bc2816735b9544d 2024-11-21T00:28:25,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741843_1019 (size=5153) 2024-11-21T00:28:25,673 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/norep/97708380fdaf4897a2154a5a94849292 is 33, key is row2/norep:/1732148904233/DeleteFamily/seqid=0 2024-11-21T00:28:25,680 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:25,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741844_1020 (size=5108) 2024-11-21T00:28:25,737 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1488, reset compression=false 2024-11-21T00:28:25,773 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:25,810 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 77cd4c08441e114fb1e8cfa9a38f0728, ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:25,820 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:28:25,820 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:28:26,010 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 77cd4c08441e114fb1e8cfa9a38f0728, ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:26,025 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:26,049 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1488, reset compression=false 2024-11-21T00:28:26,052 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/ns/4250e6c5cd1f42cca7aa7ac0d5b938a8 2024-11-21T00:28:26,084 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/norep/97708380fdaf4897a2154a5a94849292 2024-11-21T00:28:26,090 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 97708380fdaf4897a2154a5a94849292 2024-11-21T00:28:26,093 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/f/478628cea67247e682e0c0c8c23cbef9 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/f/478628cea67247e682e0c0c8c23cbef9 2024-11-21T00:28:26,115 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/rep_barrier/65f2164064f74831bdb4043e6dd77d74 is 112, key is test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05./rep_barrier:seqnumDuringOpen/1732148892489/Put/seqid=0 2024-11-21T00:28:26,125 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 478628cea67247e682e0c0c8c23cbef9 2024-11-21T00:28:26,125 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/f/478628cea67247e682e0c0c8c23cbef9, entries=3, sequenceid=10, filesize=5.0 K 2024-11-21T00:28:26,126 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/f1/ffa42c970e2a45029bc2816735b9544d as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/f1/ffa42c970e2a45029bc2816735b9544d 2024-11-21T00:28:26,136 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ffa42c970e2a45029bc2816735b9544d 2024-11-21T00:28:26,136 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/f1/ffa42c970e2a45029bc2816735b9544d, entries=3, sequenceid=10, filesize=5.0 K 2024-11-21T00:28:26,140 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/.tmp/norep/97708380fdaf4897a2154a5a94849292 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/norep/97708380fdaf4897a2154a5a94849292 2024-11-21T00:28:26,161 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 97708380fdaf4897a2154a5a94849292 2024-11-21T00:28:26,162 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/norep/97708380fdaf4897a2154a5a94849292, entries=1, sequenceid=10, filesize=5.0 K 2024-11-21T00:28:26,165 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~276 B/276, heapSize ~1.73 KB/1768, currentSize=0 B/0 for ef3b8661a5d0aea3ac56d3688db20d05 in 751ms, sequenceid=10, compaction requested=false 2024-11-21T00:28:26,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741845_1021 (size=5518) 2024-11-21T00:28:26,200 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/rep_barrier/65f2164064f74831bdb4043e6dd77d74 2024-11-21T00:28:26,210 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 77cd4c08441e114fb1e8cfa9a38f0728, ef3b8661a5d0aea3ac56d3688db20d05 2024-11-21T00:28:26,242 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/default/test/ef3b8661a5d0aea3ac56d3688db20d05/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=1 2024-11-21T00:28:26,242 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:26,242 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:28:26,242 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ef3b8661a5d0aea3ac56d3688db20d05: Waiting for close lock at 1732148905411Running coprocessor pre-close hooks at 1732148905411Disabling compacts and flushes for region at 1732148905411Disabling writes for close at 1732148905411Obtaining lock to block concurrent updates at 1732148905411Preparing flush snapshotting stores in ef3b8661a5d0aea3ac56d3688db20d05 at 1732148905411Finished memstore snapshotting test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05., syncing WAL and waiting on mvcc, flushsize=dataSize=276, getHeapSize=1768, getOffHeapSize=0, getCellsCount=10 at 1732148905412 (+1 ms)Flushing stores of test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. at 1732148905412Flushing ef3b8661a5d0aea3ac56d3688db20d05/f: creating writer at 1732148905412Flushing ef3b8661a5d0aea3ac56d3688db20d05/f: appending metadata at 1732148905467 (+55 ms)Flushing ef3b8661a5d0aea3ac56d3688db20d05/f: closing flushed file at 1732148905467Flushing ef3b8661a5d0aea3ac56d3688db20d05/f1: creating writer at 1732148905555 (+88 ms)Flushing ef3b8661a5d0aea3ac56d3688db20d05/f1: appending metadata at 1732148905588 (+33 ms)Flushing ef3b8661a5d0aea3ac56d3688db20d05/f1: closing flushed file at 1732148905588Flushing ef3b8661a5d0aea3ac56d3688db20d05/norep: creating writer at 1732148905638 (+50 ms)Flushing ef3b8661a5d0aea3ac56d3688db20d05/norep: appending metadata at 1732148905672 (+34 ms)Flushing ef3b8661a5d0aea3ac56d3688db20d05/norep: closing flushed file at 1732148905673 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@570fb105: reopening flushed file at 1732148906091 (+418 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4da669e5: reopening flushed file at 1732148906125 (+34 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@bc25e88: reopening flushed file at 1732148906136 (+11 ms)Finished flush of dataSize ~276 B/276, heapSize ~1.73 KB/1768, currentSize=0 B/0 for ef3b8661a5d0aea3ac56d3688db20d05 in 751ms, sequenceid=10, compaction requested=false at 1732148906166 (+30 ms)Writing region close event to WAL at 1732148906222 (+56 ms)Running coprocessor post-close hooks at 1732148906242 (+20 ms)Closed at 1732148906242 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148891945.ef3b8661a5d0aea3ac56d3688db20d05. 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 77cd4c08441e114fb1e8cfa9a38f0728, disabling compactions & flushes 2024-11-21T00:28:26,243 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. after waiting 0 ms 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 77cd4c08441e114fb1e8cfa9a38f0728: Waiting for close lock at 1732148906243Running coprocessor pre-close hooks at 1732148906243Disabling compacts and flushes for region at 1732148906243Disabling writes for close at 1732148906243Failed flush hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728., putting online again at 1732148906243 2024-11-21T00:28:26,243 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2435): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,272 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/table/abeea55ca33b418996e71d8b73a89ef2 is 53, key is hbase:replication/table:state/1732148898549/Put/seqid=0 2024-11-21T00:28:26,300 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:26,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741846_1022 (size=5308) 2024-11-21T00:28:26,327 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/table/abeea55ca33b418996e71d8b73a89ef2 2024-11-21T00:28:26,332 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/info/b69052ac8f4d436a936eba62d285131e as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/info/b69052ac8f4d436a936eba62d285131e 2024-11-21T00:28:26,351 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/info/b69052ac8f4d436a936eba62d285131e, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:28:26,354 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/ns/4250e6c5cd1f42cca7aa7ac0d5b938a8 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/ns/4250e6c5cd1f42cca7aa7ac0d5b938a8 2024-11-21T00:28:26,367 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/ns/4250e6c5cd1f42cca7aa7ac0d5b938a8, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:28:26,377 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/rep_barrier/65f2164064f74831bdb4043e6dd77d74 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/rep_barrier/65f2164064f74831bdb4043e6dd77d74 2024-11-21T00:28:26,393 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/rep_barrier/65f2164064f74831bdb4043e6dd77d74, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:28:26,398 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/.tmp/table/abeea55ca33b418996e71d8b73a89ef2 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/table/abeea55ca33b418996e71d8b73a89ef2 2024-11-21T00:28:26,410 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/table/abeea55ca33b418996e71d8b73a89ef2, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:28:26,410 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:28:26,411 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1325): Online Regions={77cd4c08441e114fb1e8cfa9a38f0728=hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:28:26,411 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(3091): Received CLOSE for 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:26,411 DEBUG [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 77cd4c08441e114fb1e8cfa9a38f0728 2024-11-21T00:28:26,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 77cd4c08441e114fb1e8cfa9a38f0728, disabling compactions & flushes 2024-11-21T00:28:26,411 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. after waiting 0 ms 2024-11-21T00:28:26,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 77cd4c08441e114fb1e8cfa9a38f0728: Waiting for close lock at 1732148906411Running coprocessor pre-close hooks at 1732148906411Disabling compacts and flushes for region at 1732148906411Disabling writes for close at 1732148906411Failed flush hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728., putting online again at 1732148906411 2024-11-21T00:28:26,411 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionServer(2435): Abort already in progress. Ignoring the current request with reason: Unrecoverable exception while closing hbase:replication,,1732148897982.77cd4c08441e114fb1e8cfa9a38f0728. 2024-11-21T00:28:26,411 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1001ms, sequenceid=16, compaction requested=false 2024-11-21T00:28:26,489 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 1488, reset compression=false 2024-11-21T00:28:26,523 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:26,523 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 2132, reset compression=false 2024-11-21T00:28:26,534 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:28:26,535 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:26,535 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:28:26,535 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:26,535 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148905409Running coprocessor pre-close hooks at 1732148905409Disabling compacts and flushes for region at 1732148905409Disabling writes for close at 1732148905409Obtaining lock to block concurrent updates at 1732148905410 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732148905410Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148905410Flushing stores of hbase:meta,,1.1588230740 at 1732148905411 (+1 ms)Flushing 1588230740/info: creating writer at 1732148905411Flushing 1588230740/info: appending metadata at 1732148905467 (+56 ms)Flushing 1588230740/info: closing flushed file at 1732148905467Flushing 1588230740/ns: creating writer at 1732148905534 (+67 ms)Flushing 1588230740/ns: appending metadata at 1732148905579 (+45 ms)Flushing 1588230740/ns: closing flushed file at 1732148905579Flushing 1588230740/rep_barrier: creating writer at 1732148906057 (+478 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148906114 (+57 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148906114Flushing 1588230740/table: creating writer at 1732148906218 (+104 ms)Flushing 1588230740/table: appending metadata at 1732148906272 (+54 ms)Flushing 1588230740/table: closing flushed file at 1732148906272Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f421069: reopening flushed file at 1732148906331 (+59 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2688c1f7: reopening flushed file at 1732148906351 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@460acea: reopening flushed file at 1732148906368 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b5ac1b: reopening flushed file at 1732148906393 (+25 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1001ms, sequenceid=16, compaction requested=false at 1732148906411 (+18 ms)Writing region close event to WAL at 1732148906484 (+73 ms)Running coprocessor post-close hooks at 1732148906535 (+51 ms)Closed at 1732148906535 2024-11-21T00:28:26,535 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:26,611 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1346): We were exiting though online regions are not empty, because some regions failed closing 2024-11-21T00:28:26,611 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,45849,1732148887589; all regions closed. 2024-11-21T00:28:26,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:28:26,639 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.meta.1732148889402.meta not finished, retry = 0 2024-11-21T00:28:26,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741839_1015 (size=2171) 2024-11-21T00:28:26,745 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 to pos 2132, reset compression=false 2024-11-21T00:28:26,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741833_1009 (size=2140) 2024-11-21T00:28:26,757 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:26,757 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:28:26,757 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:28:26,758 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:28:26,758 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.WALEntryStream(456): EOF, closing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/WALs/5ed4808ef0e6,45849,1732148887589/5ed4808ef0e6%2C45849%2C1732148887589.1732148888927 2024-11-21T00:28:26,758 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:28:26,760 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:28:26,760 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,45849,1732148887589 because: Region server is closing 2024-11-21T00:28:26,761 INFO [RS:0;5ed4808ef0e6:45849 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:45849. 2024-11-21T00:28:26,761 DEBUG [RS:0;5ed4808ef0e6:45849 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:26,761 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:26,761 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:26,764 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:26,862 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.shipper5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 terminated 2024-11-21T00:28:26,862 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45849,1732148887589.replicationSource.wal-reader.5ed4808ef0e6%2C45849%2C1732148887589,1-5ed4808ef0e6,45849,1732148887589 {}] regionserver.ReplicationSourceWALReader(198): Interrupted while sleeping between WAL reads or adding WAL batch to ship queue java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1640) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.put(LinkedBlockingQueue.java:343) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:157) ~[classes/:?] 2024-11-21T00:28:26,862 INFO [RS:0;5ed4808ef0e6:45849 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:45849. 2024-11-21T00:28:26,862 DEBUG [RS:0;5ed4808ef0e6:45849 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:26,862 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:26,862 DEBUG [RS:0;5ed4808ef0e6:45849 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:26,862 INFO [RS:0;5ed4808ef0e6:45849 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45849 2024-11-21T00:28:26,864 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:26,951 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:26,952 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:27,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428/rs 2024-11-21T00:28:27,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/21019767428/rs/5ed4808ef0e6,45849,1732148887589 2024-11-21T00:28:27,024 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:28:27,040 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,45849,1732148887589] 2024-11-21T00:28:27,052 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /21019767428/draining/5ed4808ef0e6,45849,1732148887589 already deleted, retry=false 2024-11-21T00:28:27,052 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,45849,1732148887589 expired; onlineServers=0 2024-11-21T00:28:27,053 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,35935,1732148887414' ***** 2024-11-21T00:28:27,053 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:28:27,053 INFO [M:0;5ed4808ef0e6:35935 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:28:27,053 INFO [M:0;5ed4808ef0e6:35935 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:28:27,053 DEBUG [M:0;5ed4808ef0e6:35935 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:28:27,053 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:28:27,053 DEBUG [M:0;5ed4808ef0e6:35935 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:28:27,053 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148888540 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148888540,5,FailOnTimeoutGroup] 2024-11-21T00:28:27,053 INFO [M:0;5ed4808ef0e6:35935 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:28:27,053 INFO [M:0;5ed4808ef0e6:35935 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:28:27,054 DEBUG [M:0;5ed4808ef0e6:35935 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:28:27,054 INFO [M:0;5ed4808ef0e6:35935 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:28:27,054 INFO [M:0;5ed4808ef0e6:35935 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:28:27,054 INFO [M:0;5ed4808ef0e6:35935 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:28:27,054 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148888548 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148888548,5,FailOnTimeoutGroup] 2024-11-21T00:28:27,056 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:28:27,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/21019767428/master 2024-11-21T00:28:27,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/21019767428 2024-11-21T00:28:27,071 DEBUG [M:0;5ed4808ef0e6:35935 {}] zookeeper.RecoverableZooKeeper(212): Node /21019767428/master already deleted, retry=false 2024-11-21T00:28:27,071 DEBUG [M:0;5ed4808ef0e6:35935 {}] master.ActiveMasterManager(353): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Failed delete of our master address node; KeeperErrorCode = NoNode for /21019767428/master 2024-11-21T00:28:27,078 INFO [M:0;5ed4808ef0e6:35935 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/.lastflushedseqids 2024-11-21T00:28:27,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:27,141 INFO [RS:0;5ed4808ef0e6:45849 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:28:27,141 INFO [RS:0;5ed4808ef0e6:45849 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,45849,1732148887589; zookeeper connection closed. 2024-11-21T00:28:27,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45849-0x1015aca43ea0007, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:27,162 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@16b79581 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@16b79581 2024-11-21T00:28:27,164 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:28:27,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741847_1023 (size=245) 2024-11-21T00:28:27,586 INFO [M:0;5ed4808ef0e6:35935 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:28:27,588 INFO [M:0;5ed4808ef0e6:35935 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:28:27,588 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:27,588 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:27,588 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:27,588 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:27,588 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:27,589 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.51 KB heapSize=64.93 KB 2024-11-21T00:28:27,643 DEBUG [M:0;5ed4808ef0e6:35935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a6492493caf042ad96c849eda0f2efe9 is 82, key is hbase:meta,,1/info:regioninfo/1732148889471/Put/seqid=0 2024-11-21T00:28:27,670 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:27,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741848_1024 (size=5672) 2024-11-21T00:28:27,688 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a6492493caf042ad96c849eda0f2efe9 2024-11-21T00:28:27,747 DEBUG [M:0;5ed4808ef0e6:35935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec6dd2d6ee82420cb70eb89b274b7678 is 1480, key is \x00\x00\x00\x00\x00\x00\x00\x08/proc:d/1732148898565/Put/seqid=0 2024-11-21T00:28:27,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741849_1025 (size=8517) 2024-11-21T00:28:27,994 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:28,169 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=54.96 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec6dd2d6ee82420cb70eb89b274b7678 2024-11-21T00:28:28,212 DEBUG [M:0;5ed4808ef0e6:35935 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e25c09537f4f4c719b692b6df74482e8 is 69, key is 5ed4808ef0e6,45849,1732148887589/rs:state/1732148888634/Put/seqid=0 2024-11-21T00:28:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741850_1026 (size=5156) 2024-11-21T00:28:28,227 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e25c09537f4f4c719b692b6df74482e8 2024-11-21T00:28:28,242 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a6492493caf042ad96c849eda0f2efe9 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a6492493caf042ad96c849eda0f2efe9 2024-11-21T00:28:28,247 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a6492493caf042ad96c849eda0f2efe9, entries=8, sequenceid=97, filesize=5.5 K 2024-11-21T00:28:28,252 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ec6dd2d6ee82420cb70eb89b274b7678 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec6dd2d6ee82420cb70eb89b274b7678 2024-11-21T00:28:28,280 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ec6dd2d6ee82420cb70eb89b274b7678, entries=11, sequenceid=97, filesize=8.3 K 2024-11-21T00:28:28,281 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e25c09537f4f4c719b692b6df74482e8 as hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e25c09537f4f4c719b692b6df74482e8 2024-11-21T00:28:28,286 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35655/user/jenkins/test-data/4b2086ed-1c47-2b3e-120a-1a70384e5a5d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e25c09537f4f4c719b692b6df74482e8, entries=1, sequenceid=97, filesize=5.0 K 2024-11-21T00:28:28,287 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.51 KB/56847, heapSize ~64.63 KB/66184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 699ms, sequenceid=97, compaction requested=false 2024-11-21T00:28:28,316 INFO [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:28,316 DEBUG [M:0;5ed4808ef0e6:35935 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148907588Disabling compacts and flushes for region at 1732148907588Disabling writes for close at 1732148907588Obtaining lock to block concurrent updates at 1732148907589 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148907589Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=56847, getHeapSize=66424, getOffHeapSize=0, getCellsCount=114 at 1732148907589Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148907600 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148907600Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148907643 (+43 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148907643Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148907698 (+55 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148907746 (+48 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148907746Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148908175 (+429 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148908207 (+32 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148908207Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60a0bc7c: reopening flushed file at 1732148908233 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a08f3e4: reopening flushed file at 1732148908247 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e98288: reopening flushed file at 1732148908281 (+34 ms)Finished flush of dataSize ~55.51 KB/56847, heapSize ~64.63 KB/66184, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 699ms, sequenceid=97, compaction requested=false at 1732148908287 (+6 ms)Writing region close event to WAL at 1732148908316 (+29 ms)Closed at 1732148908316 2024-11-21T00:28:28,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44027 is added to blk_1073741830_1006 (size=63666) 2024-11-21T00:28:28,348 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:28:28,348 INFO [M:0;5ed4808ef0e6:35935 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:28:28,348 INFO [M:0;5ed4808ef0e6:35935 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35935 2024-11-21T00:28:28,350 INFO [M:0;5ed4808ef0e6:35935 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:28:28,492 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:28,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:28,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35935-0x1015aca43ea0006, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:28,520 INFO [M:0;5ed4808ef0e6:35935 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:28:28,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25fb5ae3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:28,584 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@517cb558{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:28:28,584 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:28:28,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bc71f1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:28:28,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f64b84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.log.dir/,STOPPED} 2024-11-21T00:28:28,586 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:28:28,586 WARN [BP-1977379224-172.17.0.2-1732148884812 heartbeating to localhost/127.0.0.1:35655 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:28:28,586 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:28:28,586 WARN [BP-1977379224-172.17.0.2-1732148884812 heartbeating to localhost/127.0.0.1:35655 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1977379224-172.17.0.2-1732148884812 (Datanode Uuid efdb4c1b-191b-4fd9-afe3-621191e442c8) service to localhost/127.0.0.1:35655 2024-11-21T00:28:28,586 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/cluster_b0320c97-1961-290a-e797-e4612a89f93b/data/data1/current/BP-1977379224-172.17.0.2-1732148884812 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:28:28,587 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/cluster_b0320c97-1961-290a-e797-e4612a89f93b/data/data2/current/BP-1977379224-172.17.0.2-1732148884812 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:28:28,587 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:28:28,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a121b35{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:28,600 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@216f8310{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:28:28,600 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:28:28,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@129ae8a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:28:28,601 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e8625ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.log.dir/,STOPPED} 2024-11-21T00:28:28,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:28:28,625 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:28:28,625 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:28,625 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:438) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:28,625 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,635 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:28,635 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:28:28,635 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1782047321, stopped=false 2024-11-21T00:28:28,635 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,38567,1732148882162 2024-11-21T00:28:28,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-528321262/running 2024-11-21T00:28:28,711 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:28:28,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:28,712 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:28,712 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:438) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:28,712 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,712 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,43643,1732148882416' ***** 2024-11-21T00:28:28,712 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:28:28,712 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:28:28,712 INFO [RS:0;5ed4808ef0e6:43643 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:28:28,712 INFO [RS:0;5ed4808ef0e6:43643 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:28:28,712 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(3091): Received CLOSE for d2567d2d5e06963377ad15ea0bba477d 2024-11-21T00:28:28,713 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:28:28,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-528321262/running 2024-11-21T00:28:28,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:28,716 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(3091): Received CLOSE for ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:28,716 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:28,716 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:28:28,716 INFO [RS:0;5ed4808ef0e6:43643 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:43643. 2024-11-21T00:28:28,716 DEBUG [RS:0;5ed4808ef0e6:43643 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:28,716 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:28,717 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:28:28,717 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:28:28,717 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:28:28,717 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:28:28,717 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on znode that does not yet exist, /1-528321262/running 2024-11-21T00:28:28,720 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing d2567d2d5e06963377ad15ea0bba477d, disabling compactions & flushes 2024-11-21T00:28:28,720 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:28,720 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:28,720 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. after waiting 0 ms 2024-11-21T00:28:28,720 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:28,721 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing d2567d2d5e06963377ad15ea0bba477d 3/3 column families, dataSize=1.16 KB heapSize=2.50 KB 2024-11-21T00:28:28,721 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Set watcher on znode that does not yet exist, /1-528321262/running 2024-11-21T00:28:28,729 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:28:28,729 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1325): Online Regions={d2567d2d5e06963377ad15ea0bba477d=hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d., 1588230740=hbase:meta,,1.1588230740, ea4ec3b5aa2624682520b354168ef297=test,,1732148890791.ea4ec3b5aa2624682520b354168ef297.} 2024-11-21T00:28:28,729 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, d2567d2d5e06963377ad15ea0bba477d, ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:28,730 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:28,730 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:28,730 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:28,730 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:28,730 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:28,730 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:28:28,769 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/info/02720af0cec143dfba7d30eb822b4766 is 147, key is hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d./info:regioninfo/1732148897334/Put/seqid=0 2024-11-21T00:28:28,771 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d/.tmp/queue/565639f577e24011b3c9190c92578f0b is 154, key is 1-5ed4808ef0e6,43643,1732148882416/queue:5ed4808ef0e6%2C43643%2C1732148882416/1732148904796/Put/seqid=0 2024-11-21T00:28:28,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741840_1016 (size=7686) 2024-11-21T00:28:28,783 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/info/02720af0cec143dfba7d30eb822b4766 2024-11-21T00:28:28,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741841_1017 (size=5353) 2024-11-21T00:28:28,806 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:28:28,813 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.16 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d/.tmp/queue/565639f577e24011b3c9190c92578f0b 2024-11-21T00:28:28,827 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d/.tmp/queue/565639f577e24011b3c9190c92578f0b as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d/queue/565639f577e24011b3c9190c92578f0b 2024-11-21T00:28:28,833 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d/queue/565639f577e24011b3c9190c92578f0b, entries=1, sequenceid=12, filesize=5.2 K 2024-11-21T00:28:28,834 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1192, heapSize ~1.98 KB/2032, currentSize=0 B/0 for d2567d2d5e06963377ad15ea0bba477d in 114ms, sequenceid=12, compaction requested=false 2024-11-21T00:28:28,845 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/ns/ee0b7e5c80974f228a4392ccbfb06bc1 is 43, key is default/ns:d/1732148884638/Put/seqid=0 2024-11-21T00:28:28,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741842_1018 (size=5153) 2024-11-21T00:28:28,858 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/ns/ee0b7e5c80974f228a4392ccbfb06bc1 2024-11-21T00:28:28,861 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/replication/d2567d2d5e06963377ad15ea0bba477d/recovered.edits/15.seqid, newMaxSeqId=15, maxSeqId=1 2024-11-21T00:28:28,862 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:28,862 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:28:28,862 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:28,862 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for d2567d2d5e06963377ad15ea0bba477d: Waiting for close lock at 1732148908720Running coprocessor pre-close hooks at 1732148908720Disabling compacts and flushes for region at 1732148908720Disabling writes for close at 1732148908720Obtaining lock to block concurrent updates at 1732148908721 (+1 ms)Preparing flush snapshotting stores in d2567d2d5e06963377ad15ea0bba477d at 1732148908721Finished memstore snapshotting hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d., syncing WAL and waiting on mvcc, flushsize=dataSize=1192, getHeapSize=2512, getOffHeapSize=0, getCellsCount=8 at 1732148908721Flushing stores of hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. at 1732148908732 (+11 ms)Flushing d2567d2d5e06963377ad15ea0bba477d/queue: creating writer at 1732148908732Flushing d2567d2d5e06963377ad15ea0bba477d/queue: appending metadata at 1732148908770 (+38 ms)Flushing d2567d2d5e06963377ad15ea0bba477d/queue: closing flushed file at 1732148908770Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2d4d65fc: reopening flushed file at 1732148908825 (+55 ms)Finished flush of dataSize ~1.16 KB/1192, heapSize ~1.98 KB/2032, currentSize=0 B/0 for d2567d2d5e06963377ad15ea0bba477d in 114ms, sequenceid=12, compaction requested=false at 1732148908834 (+9 ms)Writing region close event to WAL at 1732148908847 (+13 ms)Running coprocessor post-close hooks at 1732148908862 (+15 ms)Closed at 1732148908862 2024-11-21T00:28:28,862 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148896813.d2567d2d5e06963377ad15ea0bba477d. 2024-11-21T00:28:28,862 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ea4ec3b5aa2624682520b354168ef297, disabling compactions & flushes 2024-11-21T00:28:28,862 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:28,862 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:28,863 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. after waiting 0 ms 2024-11-21T00:28:28,863 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:28,863 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ea4ec3b5aa2624682520b354168ef297 3/3 column families, dataSize=276 B heapSize=1.77 KB 2024-11-21T00:28:28,893 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/f/8e211ffbb41b405887554dd4f3bc9c31 is 29, key is row1/f:/1732148903219/DeleteFamily/seqid=0 2024-11-21T00:28:28,898 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:28:28,898 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:28:28,905 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/rep_barrier/786425c591694d2a972691399ca32761 is 112, key is test,,1732148890791.ea4ec3b5aa2624682520b354168ef297./rep_barrier:seqnumDuringOpen/1732148891683/Put/seqid=0 2024-11-21T00:28:28,929 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:28,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741843_1019 (size=5152) 2024-11-21T00:28:28,945 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/f/8e211ffbb41b405887554dd4f3bc9c31 2024-11-21T00:28:28,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741844_1020 (size=5518) 2024-11-21T00:28:28,950 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/rep_barrier/786425c591694d2a972691399ca32761 2024-11-21T00:28:28,967 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8e211ffbb41b405887554dd4f3bc9c31 2024-11-21T00:28:28,995 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/f1/a451231b6a544953a277f7c3eda76516 is 30, key is row1/f1:/1732148903219/DeleteFamily/seqid=0 2024-11-21T00:28:29,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741845_1021 (size=5158) 2024-11-21T00:28:29,034 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/table/3e5c44660f75432281ba535450c22315 is 53, key is hbase:replication/table:state/1732148897355/Put/seqid=0 2024-11-21T00:28:29,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741846_1022 (size=5308) 2024-11-21T00:28:29,077 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/table/3e5c44660f75432281ba535450c22315 2024-11-21T00:28:29,089 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/info/02720af0cec143dfba7d30eb822b4766 as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/info/02720af0cec143dfba7d30eb822b4766 2024-11-21T00:28:29,094 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/info/02720af0cec143dfba7d30eb822b4766, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:28:29,095 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/ns/ee0b7e5c80974f228a4392ccbfb06bc1 as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/ns/ee0b7e5c80974f228a4392ccbfb06bc1 2024-11-21T00:28:29,103 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/ns/ee0b7e5c80974f228a4392ccbfb06bc1, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:28:29,104 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/rep_barrier/786425c591694d2a972691399ca32761 as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/rep_barrier/786425c591694d2a972691399ca32761 2024-11-21T00:28:29,110 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/rep_barrier/786425c591694d2a972691399ca32761, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:28:29,111 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/.tmp/table/3e5c44660f75432281ba535450c22315 as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/table/3e5c44660f75432281ba535450c22315 2024-11-21T00:28:29,121 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/table/3e5c44660f75432281ba535450c22315, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:28:29,127 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 397ms, sequenceid=16, compaction requested=false 2024-11-21T00:28:29,127 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:28:29,130 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:29,139 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:29,201 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:28:29,202 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:29,202 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:28:29,202 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:29,202 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148908730Running coprocessor pre-close hooks at 1732148908730Disabling compacts and flushes for region at 1732148908730Disabling writes for close at 1732148908730Obtaining lock to block concurrent updates at 1732148908730Preparing flush snapshotting stores in 1588230740 at 1732148908730Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148908730Flushing stores of hbase:meta,,1.1588230740 at 1732148908731 (+1 ms)Flushing 1588230740/info: creating writer at 1732148908731Flushing 1588230740/info: appending metadata at 1732148908761 (+30 ms)Flushing 1588230740/info: closing flushed file at 1732148908761Flushing 1588230740/ns: creating writer at 1732148908790 (+29 ms)Flushing 1588230740/ns: appending metadata at 1732148908845 (+55 ms)Flushing 1588230740/ns: closing flushed file at 1732148908845Flushing 1588230740/rep_barrier: creating writer at 1732148908868 (+23 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148908904 (+36 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148908904Flushing 1588230740/table: creating writer at 1732148908970 (+66 ms)Flushing 1588230740/table: appending metadata at 1732148909033 (+63 ms)Flushing 1588230740/table: closing flushed file at 1732148909033Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e08f9c9: reopening flushed file at 1732148909088 (+55 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f734d85: reopening flushed file at 1732148909094 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2dba67f8: reopening flushed file at 1732148909103 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c940a3c: reopening flushed file at 1732148909110 (+7 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 397ms, sequenceid=16, compaction requested=false at 1732148909127 (+17 ms)Writing region close event to WAL at 1732148909172 (+45 ms)Running coprocessor post-close hooks at 1732148909202 (+30 ms)Closed at 1732148909202 2024-11-21T00:28:29,202 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:29,330 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1351): Waiting on ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:29,409 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1191, reset compression=false 2024-11-21T00:28:29,422 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:29,422 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1438, reset compression=false 2024-11-21T00:28:29,422 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,43643,1732148882416 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077, lastWalPosition=1438, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:29,423 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,43643,1732148882416: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,43643,1732148882416, walGroup=5ed4808ef0e6%2C43643%2C1732148882416, offset=5ed4808ef0e6%2C43643%2C1732148882416.1732148884077:1438, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:43643 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:28:29,426 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:28:29,426 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:28:29,430 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:28:29,430 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:28:29,430 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:28:29,430 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1528823808, "init": 1048576000, "max": 2306867200, "used": 1063549264 }, "NonHeapMemoryUsage": { "committed": 209387520, "init": 7667712, "max": -1, "used": 206169752 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:28:29,430 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:38567 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:38567 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:28:29,434 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=77 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/f1/a451231b6a544953a277f7c3eda76516 2024-11-21T00:28:29,443 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a451231b6a544953a277f7c3eda76516 2024-11-21T00:28:29,472 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/norep/74f209dc0f9d494281cb0a73bc612ccb is 33, key is row1/norep:/1732148903219/DeleteFamily/seqid=0 2024-11-21T00:28:29,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741847_1023 (size=5108) 2024-11-21T00:28:29,532 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1351): Waiting on ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:29,657 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1438, reset compression=false 2024-11-21T00:28:29,732 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:28:29,732 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1325): Online Regions={ea4ec3b5aa2624682520b354168ef297=test,,1732148890791.ea4ec3b5aa2624682520b354168ef297.} 2024-11-21T00:28:29,732 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1351): Waiting on ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:29,888 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29 B at sequenceid=10 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/norep/74f209dc0f9d494281cb0a73bc612ccb 2024-11-21T00:28:29,894 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 74f209dc0f9d494281cb0a73bc612ccb 2024-11-21T00:28:29,895 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/f/8e211ffbb41b405887554dd4f3bc9c31 as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/f/8e211ffbb41b405887554dd4f3bc9c31 2024-11-21T00:28:29,903 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8e211ffbb41b405887554dd4f3bc9c31 2024-11-21T00:28:29,903 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/f/8e211ffbb41b405887554dd4f3bc9c31, entries=3, sequenceid=10, filesize=5.0 K 2024-11-21T00:28:29,906 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/f1/a451231b6a544953a277f7c3eda76516 as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/f1/a451231b6a544953a277f7c3eda76516 2024-11-21T00:28:29,915 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a451231b6a544953a277f7c3eda76516 2024-11-21T00:28:29,915 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/f1/a451231b6a544953a277f7c3eda76516, entries=3, sequenceid=10, filesize=5.0 K 2024-11-21T00:28:29,917 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/.tmp/norep/74f209dc0f9d494281cb0a73bc612ccb as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/norep/74f209dc0f9d494281cb0a73bc612ccb 2024-11-21T00:28:29,929 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 74f209dc0f9d494281cb0a73bc612ccb 2024-11-21T00:28:29,929 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/norep/74f209dc0f9d494281cb0a73bc612ccb, entries=1, sequenceid=10, filesize=5.0 K 2024-11-21T00:28:29,930 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~276 B/276, heapSize ~1.73 KB/1768, currentSize=0 B/0 for ea4ec3b5aa2624682520b354168ef297 in 1067ms, sequenceid=10, compaction requested=false 2024-11-21T00:28:29,930 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:28:29,933 DEBUG [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1351): Waiting on ea4ec3b5aa2624682520b354168ef297 2024-11-21T00:28:29,992 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 1438, reset compression=false 2024-11-21T00:28:29,998 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/data/default/test/ea4ec3b5aa2624682520b354168ef297/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=1 2024-11-21T00:28:29,999 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:29,999 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:28:29,999 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:29,999 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ea4ec3b5aa2624682520b354168ef297: Waiting for close lock at 1732148908862Running coprocessor pre-close hooks at 1732148908862Disabling compacts and flushes for region at 1732148908862Disabling writes for close at 1732148908863 (+1 ms)Obtaining lock to block concurrent updates at 1732148908863Preparing flush snapshotting stores in ea4ec3b5aa2624682520b354168ef297 at 1732148908863Finished memstore snapshotting test,,1732148890791.ea4ec3b5aa2624682520b354168ef297., syncing WAL and waiting on mvcc, flushsize=dataSize=276, getHeapSize=1768, getOffHeapSize=0, getCellsCount=10 at 1732148908863Flushing stores of test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. at 1732148908864 (+1 ms)Flushing ea4ec3b5aa2624682520b354168ef297/f: creating writer at 1732148908864Flushing ea4ec3b5aa2624682520b354168ef297/f: appending metadata at 1732148908893 (+29 ms)Flushing ea4ec3b5aa2624682520b354168ef297/f: closing flushed file at 1732148908893Flushing ea4ec3b5aa2624682520b354168ef297/f1: creating writer at 1732148908967 (+74 ms)Flushing ea4ec3b5aa2624682520b354168ef297/f1: appending metadata at 1732148908994 (+27 ms)Flushing ea4ec3b5aa2624682520b354168ef297/f1: closing flushed file at 1732148908994Flushing ea4ec3b5aa2624682520b354168ef297/norep: creating writer at 1732148909443 (+449 ms)Flushing ea4ec3b5aa2624682520b354168ef297/norep: appending metadata at 1732148909471 (+28 ms)Flushing ea4ec3b5aa2624682520b354168ef297/norep: closing flushed file at 1732148909471Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52aa8be7: reopening flushed file at 1732148909894 (+423 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62f965d8: reopening flushed file at 1732148909903 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4435999e: reopening flushed file at 1732148909915 (+12 ms)Finished flush of dataSize ~276 B/276, heapSize ~1.73 KB/1768, currentSize=0 B/0 for ea4ec3b5aa2624682520b354168ef297 in 1067ms, sequenceid=10, compaction requested=false at 1732148909930 (+15 ms)Writing region close event to WAL at 1732148909972 (+42 ms)Running coprocessor post-close hooks at 1732148909998 (+26 ms)Closed at 1732148909999 (+1 ms) 2024-11-21T00:28:29,999 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148890791.ea4ec3b5aa2624682520b354168ef297. 2024-11-21T00:28:30,027 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:30,027 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 2082, reset compression=false 2024-11-21T00:28:30,133 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,43643,1732148882416; all regions closed. 2024-11-21T00:28:30,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:28:30,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741839_1015 (size=3114) 2024-11-21T00:28:30,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741833_1009 (size=2090) 2024-11-21T00:28:30,221 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 not finished, retry = 0 2024-11-21T00:28:30,256 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 to pos 2082, reset compression=false 2024-11-21T00:28:30,323 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.WALEntryStream(456): EOF, closing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/WALs/5ed4808ef0e6,43643,1732148882416/5ed4808ef0e6%2C43643%2C1732148882416.1732148884077 2024-11-21T00:28:30,324 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:30,324 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:28:30,324 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:28:30,324 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-21T00:28:30,324 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:28:30,325 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:28:30,325 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,43643,1732148882416 because: Region server is closing 2024-11-21T00:28:30,325 INFO [RS:0;5ed4808ef0e6:43643 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:43643. 2024-11-21T00:28:30,325 DEBUG [RS:0;5ed4808ef0e6:43643 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:30,325 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:30,325 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:30,326 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:30,365 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:30,426 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.shipper5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 terminated 2024-11-21T00:28:30,426 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,43643,1732148882416.replicationSource.wal-reader.5ed4808ef0e6%2C43643%2C1732148882416,1-5ed4808ef0e6,43643,1732148882416 {}] regionserver.ReplicationSourceWALReader(198): Interrupted while sleeping between WAL reads or adding WAL batch to ship queue java.lang.InterruptedException: null at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1640) ~[?:?] at java.util.concurrent.LinkedBlockingQueue.put(LinkedBlockingQueue.java:343) ~[?:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:157) ~[classes/:?] 2024-11-21T00:28:30,426 INFO [RS:0;5ed4808ef0e6:43643 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:43643. 2024-11-21T00:28:30,426 DEBUG [RS:0;5ed4808ef0e6:43643 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:30,426 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:30,426 DEBUG [RS:0;5ed4808ef0e6:43643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:30,426 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:30,426 INFO [RS:0;5ed4808ef0e6:43643 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43643 2024-11-21T00:28:30,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-528321262/rs/5ed4808ef0e6,43643,1732148882416 2024-11-21T00:28:30,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262/rs 2024-11-21T00:28:30,441 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:28:30,442 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,43643,1732148882416] 2024-11-21T00:28:30,505 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1-528321262/draining/5ed4808ef0e6,43643,1732148882416 already deleted, retry=false 2024-11-21T00:28:30,505 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,43643,1732148882416 expired; onlineServers=0 2024-11-21T00:28:30,505 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,38567,1732148882162' ***** 2024-11-21T00:28:30,505 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:28:30,505 INFO [M:0;5ed4808ef0e6:38567 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:28:30,505 INFO [M:0;5ed4808ef0e6:38567 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:28:30,505 DEBUG [M:0;5ed4808ef0e6:38567 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:28:30,505 DEBUG [M:0;5ed4808ef0e6:38567 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:28:30,505 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:28:30,505 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148883704 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148883704,5,FailOnTimeoutGroup] 2024-11-21T00:28:30,505 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148883675 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148883675,5,FailOnTimeoutGroup] 2024-11-21T00:28:30,505 INFO [M:0;5ed4808ef0e6:38567 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:28:30,505 INFO [M:0;5ed4808ef0e6:38567 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:28:30,505 DEBUG [M:0;5ed4808ef0e6:38567 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:28:30,505 INFO [M:0;5ed4808ef0e6:38567 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:28:30,506 INFO [M:0;5ed4808ef0e6:38567 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:28:30,506 INFO [M:0;5ed4808ef0e6:38567 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:28:30,506 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:28:30,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1-528321262/master 2024-11-21T00:28:30,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1-528321262 2024-11-21T00:28:30,544 DEBUG [M:0;5ed4808ef0e6:38567 {}] zookeeper.RecoverableZooKeeper(212): Node /1-528321262/master already deleted, retry=false 2024-11-21T00:28:30,544 DEBUG [M:0;5ed4808ef0e6:38567 {}] master.ActiveMasterManager(353): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Failed delete of our master address node; KeeperErrorCode = NoNode for /1-528321262/master 2024-11-21T00:28:30,607 INFO [M:0;5ed4808ef0e6:38567 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/.lastflushedseqids 2024-11-21T00:28:30,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:30,624 INFO [RS:0;5ed4808ef0e6:43643 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:28:30,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43643-0x1015aca43ea0004, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:30,624 INFO [RS:0;5ed4808ef0e6:43643 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,43643,1732148882416; zookeeper connection closed. 2024-11-21T00:28:30,641 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f07a0ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f07a0ee 2024-11-21T00:28:30,641 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:28:30,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741848_1024 (size=245) 2024-11-21T00:28:30,643 INFO [M:0;5ed4808ef0e6:38567 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:28:30,643 INFO [M:0;5ed4808ef0e6:38567 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:28:30,643 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:30,643 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:30,643 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:30,643 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:30,643 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:30,644 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.50 KB heapSize=64.92 KB 2024-11-21T00:28:30,679 DEBUG [M:0;5ed4808ef0e6:38567 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/57df77802122453a93c1ea844a407efb is 82, key is hbase:meta,,1/info:regioninfo/1732148884586/Put/seqid=0 2024-11-21T00:28:30,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741849_1025 (size=5672) 2024-11-21T00:28:31,135 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/57df77802122453a93c1ea844a407efb 2024-11-21T00:28:31,178 DEBUG [M:0;5ed4808ef0e6:38567 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b4fb7201f924e3bb3aa7f3aaa85443e is 1480, key is \x00\x00\x00\x00\x00\x00\x00\x08/proc:d/1732148897360/Put/seqid=0 2024-11-21T00:28:31,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741850_1026 (size=8516) 2024-11-21T00:28:31,630 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=54.95 KB at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b4fb7201f924e3bb3aa7f3aaa85443e 2024-11-21T00:28:31,658 DEBUG [M:0;5ed4808ef0e6:38567 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/970a37bee73246cb9566edd51290be16 is 69, key is 5ed4808ef0e6,43643,1732148882416/rs:state/1732148883768/Put/seqid=0 2024-11-21T00:28:31,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741851_1027 (size=5156) 2024-11-21T00:28:31,682 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:31,684 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=97 (bloomFilter=true), to=hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/970a37bee73246cb9566edd51290be16 2024-11-21T00:28:31,684 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:31,709 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/57df77802122453a93c1ea844a407efb as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/57df77802122453a93c1ea844a407efb 2024-11-21T00:28:31,715 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/57df77802122453a93c1ea844a407efb, entries=8, sequenceid=97, filesize=5.5 K 2024-11-21T00:28:31,723 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8b4fb7201f924e3bb3aa7f3aaa85443e as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8b4fb7201f924e3bb3aa7f3aaa85443e 2024-11-21T00:28:31,734 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8b4fb7201f924e3bb3aa7f3aaa85443e, entries=11, sequenceid=97, filesize=8.3 K 2024-11-21T00:28:31,744 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/970a37bee73246cb9566edd51290be16 as hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/970a37bee73246cb9566edd51290be16 2024-11-21T00:28:31,746 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:31,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,762 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,778 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41775/user/jenkins/test-data/548a52f8-35da-6471-01d0-1d1d880a474a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/970a37bee73246cb9566edd51290be16, entries=1, sequenceid=97, filesize=5.0 K 2024-11-21T00:28:31,778 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.50 KB/56835, heapSize ~64.63 KB/66176, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1135ms, sequenceid=97, compaction requested=false 2024-11-21T00:28:31,828 INFO [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:31,828 DEBUG [M:0;5ed4808ef0e6:38567 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148910643Disabling compacts and flushes for region at 1732148910643Disabling writes for close at 1732148910643Obtaining lock to block concurrent updates at 1732148910644 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148910644Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=56835, getHeapSize=66416, getOffHeapSize=0, getCellsCount=114 at 1732148910648 (+4 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148910649 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148910649Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148910678 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148910678Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148911139 (+461 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148911178 (+39 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148911178Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148911639 (+461 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148911657 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148911657Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1937cc7: reopening flushed file at 1732148911698 (+41 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@278045b0: reopening flushed file at 1732148911715 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57bc7833: reopening flushed file at 1732148911734 (+19 ms)Finished flush of dataSize ~55.50 KB/56835, heapSize ~64.63 KB/66176, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1135ms, sequenceid=97, compaction requested=false at 1732148911778 (+44 ms)Writing region close event to WAL at 1732148911828 (+50 ms)Closed at 1732148911828 2024-11-21T00:28:31,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,841 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33609 is added to blk_1073741830_1006 (size=63654) 2024-11-21T00:28:31,854 INFO [M:0;5ed4808ef0e6:38567 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:28:31,855 INFO [M:0;5ed4808ef0e6:38567 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38567 2024-11-21T00:28:31,855 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:28:31,855 INFO [M:0;5ed4808ef0e6:38567 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:28:31,862 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:31,869 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:32,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:32,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38567-0x1015aca43ea0003, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:32,013 INFO [M:0;5ed4808ef0e6:38567 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:28:32,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@530a01df{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:32,066 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2644a270{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:28:32,066 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:28:32,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31e40e83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:28:32,066 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a2f4dd3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.log.dir/,STOPPED} 2024-11-21T00:28:32,068 WARN [BP-721868918-172.17.0.2-1732148878788 heartbeating to localhost/127.0.0.1:41775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:28:32,068 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:28:32,068 WARN [BP-721868918-172.17.0.2-1732148878788 heartbeating to localhost/127.0.0.1:41775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-721868918-172.17.0.2-1732148878788 (Datanode Uuid f6306f22-29c3-4b8a-9643-5bda58eb0e00) service to localhost/127.0.0.1:41775 2024-11-21T00:28:32,068 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:28:32,068 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/cluster_fe5596fc-f95e-3580-43bb-38c5fede92d3/data/data1/current/BP-721868918-172.17.0.2-1732148878788 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:28:32,069 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/cluster_fe5596fc-f95e-3580-43bb-38c5fede92d3/data/data2/current/BP-721868918-172.17.0.2-1732148878788 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:28:32,069 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:28:32,076 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@489d5c51{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:32,083 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5026758a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:28:32,083 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:28:32,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ccff5d7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:28:32,083 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4420e372{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/167a67ea-4b90-0f2c-491d-53e1df50c227/hadoop.log.dir/,STOPPED} 2024-11-21T00:28:32,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:28:32,112 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:28:32,112 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:32,112 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:438) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:32,112 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:32,113 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:32,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:32,113 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:28:32,113 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=433261704, stopped=false 2024-11-21T00:28:32,114 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,42535,1732148876091 2024-11-21T00:28:32,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01310799061/running 2024-11-21T00:28:32,144 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01310799061/running 2024-11-21T00:28:32,144 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:28:32,144 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:28:32,145 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:28:32,145 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on znode that does not yet exist, /01310799061/running 2024-11-21T00:28:32,145 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Set watcher on znode that does not yet exist, /01310799061/running 2024-11-21T00:28:32,160 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:32,160 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testCyclicReplication3(TestMasterReplication.java:438) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:32,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:32,160 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,45749,1732148876424' ***** 2024-11-21T00:28:32,161 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:28:32,167 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:28:32,167 INFO [RS:0;5ed4808ef0e6:45749 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:28:32,167 INFO [RS:0;5ed4808ef0e6:45749 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:28:32,167 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(3091): Received CLOSE for 75a2a836d409c649a5c103e0b1258bf3 2024-11-21T00:28:32,168 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:28:32,168 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(3091): Received CLOSE for e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:32,168 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,45749,1732148876424 2024-11-21T00:28:32,168 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:28:32,168 INFO [RS:0;5ed4808ef0e6:45749 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:45749. 2024-11-21T00:28:32,168 DEBUG [RS:0;5ed4808ef0e6:45749 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:32,168 DEBUG [RS:0;5ed4808ef0e6:45749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:32,168 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:28:32,169 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 75a2a836d409c649a5c103e0b1258bf3, disabling compactions & flushes 2024-11-21T00:28:32,169 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:32,169 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:32,169 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. after waiting 0 ms 2024-11-21T00:28:32,169 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:32,169 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 75a2a836d409c649a5c103e0b1258bf3 3/3 column families, dataSize=594 B heapSize=1.63 KB 2024-11-21T00:28:32,172 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:28:32,172 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:28:32,172 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:28:32,184 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:28:32,184 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1325): Online Regions={75a2a836d409c649a5c103e0b1258bf3=hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3., 1588230740=hbase:meta,,1.1588230740, e9f5ea62361f185c33b911c4d081b3e0=test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0.} 2024-11-21T00:28:32,184 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 75a2a836d409c649a5c103e0b1258bf3, e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:32,193 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:32,193 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:32,193 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:32,193 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:32,193 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:32,193 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:28:32,217 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3/.tmp/queue/4b718edd56e9453d9f1dcdbac1ce5bd4 is 153, key is 1-5ed4808ef0e6,45749,1732148876424/queue:5ed4808ef0e6%2C45749%2C1732148876424/1732148902370/Put/seqid=0 2024-11-21T00:28:32,236 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:28:32,239 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/info/f740fd60c22d4dfa984f6f04dea585c4 is 147, key is hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3./info:regioninfo/1732148893203/Put/seqid=0 2024-11-21T00:28:32,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741840_1016 (size=5352) 2024-11-21T00:28:32,267 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=594 B at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3/.tmp/queue/4b718edd56e9453d9f1dcdbac1ce5bd4 2024-11-21T00:28:32,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741841_1017 (size=7686) 2024-11-21T00:28:32,319 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3/.tmp/queue/4b718edd56e9453d9f1dcdbac1ce5bd4 as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3/queue/4b718edd56e9453d9f1dcdbac1ce5bd4 2024-11-21T00:28:32,325 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3/queue/4b718edd56e9453d9f1dcdbac1ce5bd4, entries=1, sequenceid=8, filesize=5.2 K 2024-11-21T00:28:32,329 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~594 B/594, heapSize ~1.11 KB/1136, currentSize=0 B/0 for 75a2a836d409c649a5c103e0b1258bf3 in 159ms, sequenceid=8, compaction requested=false 2024-11-21T00:28:32,372 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/replication/75a2a836d409c649a5c103e0b1258bf3/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-21T00:28:32,380 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:32,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:28:32,381 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:32,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 75a2a836d409c649a5c103e0b1258bf3: Waiting for close lock at 1732148912169Running coprocessor pre-close hooks at 1732148912169Disabling compacts and flushes for region at 1732148912169Disabling writes for close at 1732148912169Obtaining lock to block concurrent updates at 1732148912169Preparing flush snapshotting stores in 75a2a836d409c649a5c103e0b1258bf3 at 1732148912169Finished memstore snapshotting hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3., syncing WAL and waiting on mvcc, flushsize=dataSize=594, getHeapSize=1616, getOffHeapSize=0, getCellsCount=4 at 1732148912169Flushing stores of hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. at 1732148912172 (+3 ms)Flushing 75a2a836d409c649a5c103e0b1258bf3/queue: creating writer at 1732148912172Flushing 75a2a836d409c649a5c103e0b1258bf3/queue: appending metadata at 1732148912214 (+42 ms)Flushing 75a2a836d409c649a5c103e0b1258bf3/queue: closing flushed file at 1732148912214Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19ecc076: reopening flushed file at 1732148912318 (+104 ms)Finished flush of dataSize ~594 B/594, heapSize ~1.11 KB/1136, currentSize=0 B/0 for 75a2a836d409c649a5c103e0b1258bf3 in 159ms, sequenceid=8, compaction requested=false at 1732148912329 (+11 ms)Writing region close event to WAL at 1732148912355 (+26 ms)Running coprocessor post-close hooks at 1732148912380 (+25 ms)Closed at 1732148912381 (+1 ms) 2024-11-21T00:28:32,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148892626.75a2a836d409c649a5c103e0b1258bf3. 2024-11-21T00:28:32,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e9f5ea62361f185c33b911c4d081b3e0, disabling compactions & flushes 2024-11-21T00:28:32,381 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:32,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:32,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. after waiting 0 ms 2024-11-21T00:28:32,381 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:32,381 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing e9f5ea62361f185c33b911c4d081b3e0 3/3 column families, dataSize=107 B heapSize=1.15 KB 2024-11-21T00:28:32,384 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:32,405 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/f/4d7704191b654e9b8cd975538a2ce7da is 28, key is row/f:/1732148902213/DeleteFamily/seqid=0 2024-11-21T00:28:32,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741842_1018 (size=5085) 2024-11-21T00:28:32,444 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=54 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/f/4d7704191b654e9b8cd975538a2ce7da 2024-11-21T00:28:32,448 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d7704191b654e9b8cd975538a2ce7da 2024-11-21T00:28:32,490 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/f1/63e2d5210e6a48499c54cd54919af992 is 29, key is row/f1:/1732148902213/DeleteFamily/seqid=0 2024-11-21T00:28:32,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741843_1019 (size=5089) 2024-11-21T00:28:32,585 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:32,709 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/info/f740fd60c22d4dfa984f6f04dea585c4 2024-11-21T00:28:32,754 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/ns/d6e8fcc274094524a1e22d32c7fa22aa is 43, key is default/ns:d/1732148878664/Put/seqid=0 2024-11-21T00:28:32,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741844_1020 (size=5153) 2024-11-21T00:28:32,784 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/ns/d6e8fcc274094524a1e22d32c7fa22aa 2024-11-21T00:28:32,785 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:32,814 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/rep_barrier/e34b5b05d7894029883e8af2d001f078 is 112, key is test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0./rep_barrier:seqnumDuringOpen/1732148890249/Put/seqid=0 2024-11-21T00:28:32,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741845_1021 (size=5518) 2024-11-21T00:28:32,925 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:28:32,925 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:28:32,925 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/f1/63e2d5210e6a48499c54cd54919af992 2024-11-21T00:28:32,930 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 63e2d5210e6a48499c54cd54919af992 2024-11-21T00:28:32,950 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/norep/62504846e6114f53886bd2a9b2a43304 is 32, key is row/norep:/1732148902213/DeleteFamily/seqid=0 2024-11-21T00:28:32,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741846_1022 (size=5101) 2024-11-21T00:28:32,971 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/norep/62504846e6114f53886bd2a9b2a43304 2024-11-21T00:28:32,977 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 62504846e6114f53886bd2a9b2a43304 2024-11-21T00:28:32,979 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/f/4d7704191b654e9b8cd975538a2ce7da as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/f/4d7704191b654e9b8cd975538a2ce7da 2024-11-21T00:28:32,986 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e9f5ea62361f185c33b911c4d081b3e0 2024-11-21T00:28:32,986 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 4d7704191b654e9b8cd975538a2ce7da 2024-11-21T00:28:32,987 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/f/4d7704191b654e9b8cd975538a2ce7da, entries=1, sequenceid=6, filesize=5.0 K 2024-11-21T00:28:32,989 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/f1/63e2d5210e6a48499c54cd54919af992 as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/f1/63e2d5210e6a48499c54cd54919af992 2024-11-21T00:28:33,017 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 63e2d5210e6a48499c54cd54919af992 2024-11-21T00:28:33,017 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/f1/63e2d5210e6a48499c54cd54919af992, entries=1, sequenceid=6, filesize=5.0 K 2024-11-21T00:28:33,020 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/.tmp/norep/62504846e6114f53886bd2a9b2a43304 as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/norep/62504846e6114f53886bd2a9b2a43304 2024-11-21T00:28:33,025 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 62504846e6114f53886bd2a9b2a43304 2024-11-21T00:28:33,025 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/norep/62504846e6114f53886bd2a9b2a43304, entries=1, sequenceid=6, filesize=5.0 K 2024-11-21T00:28:33,026 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~107 B/107, heapSize ~1.10 KB/1128, currentSize=0 B/0 for e9f5ea62361f185c33b911c4d081b3e0 in 645ms, sequenceid=6, compaction requested=false 2024-11-21T00:28:33,067 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/default/test/e9f5ea62361f185c33b911c4d081b3e0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-21T00:28:33,068 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:33,068 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:28:33,068 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:33,068 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e9f5ea62361f185c33b911c4d081b3e0: Waiting for close lock at 1732148912381Running coprocessor pre-close hooks at 1732148912381Disabling compacts and flushes for region at 1732148912381Disabling writes for close at 1732148912381Obtaining lock to block concurrent updates at 1732148912381Preparing flush snapshotting stores in e9f5ea62361f185c33b911c4d081b3e0 at 1732148912381Finished memstore snapshotting test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0., syncing WAL and waiting on mvcc, flushsize=dataSize=107, getHeapSize=1128, getOffHeapSize=0, getCellsCount=4 at 1732148912382 (+1 ms)Flushing stores of test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. at 1732148912382Flushing e9f5ea62361f185c33b911c4d081b3e0/f: creating writer at 1732148912382Flushing e9f5ea62361f185c33b911c4d081b3e0/f: appending metadata at 1732148912404 (+22 ms)Flushing e9f5ea62361f185c33b911c4d081b3e0/f: closing flushed file at 1732148912404Flushing e9f5ea62361f185c33b911c4d081b3e0/f1: creating writer at 1732148912449 (+45 ms)Flushing e9f5ea62361f185c33b911c4d081b3e0/f1: appending metadata at 1732148912489 (+40 ms)Flushing e9f5ea62361f185c33b911c4d081b3e0/f1: closing flushed file at 1732148912489Flushing e9f5ea62361f185c33b911c4d081b3e0/norep: creating writer at 1732148912930 (+441 ms)Flushing e9f5ea62361f185c33b911c4d081b3e0/norep: appending metadata at 1732148912949 (+19 ms)Flushing e9f5ea62361f185c33b911c4d081b3e0/norep: closing flushed file at 1732148912950 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e33ee2f: reopening flushed file at 1732148912977 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@efb73c5: reopening flushed file at 1732148912987 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2002699a: reopening flushed file at 1732148913018 (+31 ms)Finished flush of dataSize ~107 B/107, heapSize ~1.10 KB/1128, currentSize=0 B/0 for e9f5ea62361f185c33b911c4d081b3e0 in 645ms, sequenceid=6, compaction requested=false at 1732148913026 (+8 ms)Writing region close event to WAL at 1732148913064 (+38 ms)Running coprocessor post-close hooks at 1732148913067 (+3 ms)Closed at 1732148913068 (+1 ms) 2024-11-21T00:28:33,068 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148889637.e9f5ea62361f185c33b911c4d081b3e0. 2024-11-21T00:28:33,111 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 636, reset compression=false 2024-11-21T00:28:33,143 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:28:33,143 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 1527, reset compression=false 2024-11-21T00:28:33,143 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,45749,1732148876424 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003, lastWalPosition=1527, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:28:33,144 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,45749,1732148876424: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=1-5ed4808ef0e6,45749,1732148876424, walGroup=5ed4808ef0e6%2C45749%2C1732148876424, offset=5ed4808ef0e6%2C45749%2C1732148876424.1732148878003:1527, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:45749 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:28:33,145 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:28:33,145 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:28:33,146 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:28:33,146 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:28:33,146 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:28:33,146 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1528823808, "init": 1048576000, "max": 2306867200, "used": 1155823952 }, "NonHeapMemoryUsage": { "committed": 209846272, "init": 7667712, "max": -1, "used": 206700184 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:28:33,146 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:42535 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:42535 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:28:33,187 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:28:33,187 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:28:33,187 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:28:33,258 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/rep_barrier/e34b5b05d7894029883e8af2d001f078 2024-11-21T00:28:33,362 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/table/bf83a2b9753f4ff287d682c33b1af228 is 53, key is hbase:replication/table:state/1732148893225/Put/seqid=0 2024-11-21T00:28:33,374 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.wal-reader.5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/WALs/5ed4808ef0e6,45749,1732148876424/5ed4808ef0e6%2C45749%2C1732148876424.1732148878003 to pos 1527, reset compression=false 2024-11-21T00:28:33,388 DEBUG [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:28:33,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741847_1023 (size=5308) 2024-11-21T00:28:33,411 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/table/bf83a2b9753f4ff287d682c33b1af228 2024-11-21T00:28:33,434 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/info/f740fd60c22d4dfa984f6f04dea585c4 as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/info/f740fd60c22d4dfa984f6f04dea585c4 2024-11-21T00:28:33,445 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/info/f740fd60c22d4dfa984f6f04dea585c4, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:28:33,446 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/ns/d6e8fcc274094524a1e22d32c7fa22aa as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/ns/d6e8fcc274094524a1e22d32c7fa22aa 2024-11-21T00:28:33,463 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/ns/d6e8fcc274094524a1e22d32c7fa22aa, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:28:33,465 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/rep_barrier/e34b5b05d7894029883e8af2d001f078 as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/rep_barrier/e34b5b05d7894029883e8af2d001f078 2024-11-21T00:28:33,473 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/rep_barrier/e34b5b05d7894029883e8af2d001f078, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:28:33,474 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/.tmp/table/bf83a2b9753f4ff287d682c33b1af228 as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/table/bf83a2b9753f4ff287d682c33b1af228 2024-11-21T00:28:33,483 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/table/bf83a2b9753f4ff287d682c33b1af228, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:28:33,489 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1296ms, sequenceid=16, compaction requested=false 2024-11-21T00:28:33,576 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:28:33,576 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:28:33,576 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:28:33,576 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:33,577 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148912193Running coprocessor pre-close hooks at 1732148912193Disabling compacts and flushes for region at 1732148912193Disabling writes for close at 1732148912193Obtaining lock to block concurrent updates at 1732148912193Preparing flush snapshotting stores in 1588230740 at 1732148912193Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148912194 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148912194Flushing 1588230740/info: creating writer at 1732148912194Flushing 1588230740/info: appending metadata at 1732148912239 (+45 ms)Flushing 1588230740/info: closing flushed file at 1732148912239Flushing 1588230740/ns: creating writer at 1732148912726 (+487 ms)Flushing 1588230740/ns: appending metadata at 1732148912754 (+28 ms)Flushing 1588230740/ns: closing flushed file at 1732148912754Flushing 1588230740/rep_barrier: creating writer at 1732148912791 (+37 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148912813 (+22 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148912813Flushing 1588230740/table: creating writer at 1732148913300 (+487 ms)Flushing 1588230740/table: appending metadata at 1732148913362 (+62 ms)Flushing 1588230740/table: closing flushed file at 1732148913362Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b182b9c: reopening flushed file at 1732148913433 (+71 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31b1e897: reopening flushed file at 1732148913445 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e2e3fcc: reopening flushed file at 1732148913464 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b9d9e93: reopening flushed file at 1732148913473 (+9 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1296ms, sequenceid=16, compaction requested=false at 1732148913489 (+16 ms)Writing region close event to WAL at 1732148913572 (+83 ms)Running coprocessor post-close hooks at 1732148913576 (+4 ms)Closed at 1732148913576 2024-11-21T00:28:33,577 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:33,588 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,45749,1732148876424; all regions closed. 2024-11-21T00:28:33,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741834_1010 (size=4239) 2024-11-21T00:28:33,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741839_1015 (size=2240) 2024-11-21T00:28:33,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741833_1009 (size=1535) 2024-11-21T00:28:33,668 DEBUG [RS:0;5ed4808ef0e6:45749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:33,668 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:28:33,669 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:28:33,669 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:28:33,669 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:28:33,670 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:28:33,670 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,45749,1732148876424 because: Region server is closing 2024-11-21T00:28:33,670 INFO [RS:0;5ed4808ef0e6:45749 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:45749. 2024-11-21T00:28:33,670 DEBUG [RS:0;5ed4808ef0e6:45749 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:33,670 DEBUG [RS:0;5ed4808ef0e6:45749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:33,670 DEBUG [RS:0;5ed4808ef0e6:45749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:33,672 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:33,771 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,45749,1732148876424.replicationSource.shipper5ed4808ef0e6%2C45749%2C1732148876424,1-5ed4808ef0e6,45749,1732148876424 terminated 2024-11-21T00:28:33,771 INFO [RS:0;5ed4808ef0e6:45749 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45749 2024-11-21T00:28:33,821 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01310799061/rs/5ed4808ef0e6,45749,1732148876424 2024-11-21T00:28:33,821 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:28:33,821 ERROR [pool-1939-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$366/0x00007f205c8e5cb0@21d83033 rejected from java.util.concurrent.ThreadPoolExecutor@64daba6c[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-21T00:28:33,822 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061/rs 2024-11-21T00:28:33,825 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:28:33,833 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,45749,1732148876424] 2024-11-21T00:28:33,839 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /01310799061/draining/5ed4808ef0e6,45749,1732148876424 already deleted, retry=false 2024-11-21T00:28:33,839 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,45749,1732148876424 expired; onlineServers=0 2024-11-21T00:28:33,839 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,42535,1732148876091' ***** 2024-11-21T00:28:33,839 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:28:33,839 INFO [M:0;5ed4808ef0e6:42535 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:28:33,840 INFO [M:0;5ed4808ef0e6:42535 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:28:33,840 DEBUG [M:0;5ed4808ef0e6:42535 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:28:33,840 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:28:33,840 DEBUG [M:0;5ed4808ef0e6:42535 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:28:33,840 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148877788 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148877788,5,FailOnTimeoutGroup] 2024-11-21T00:28:33,840 INFO [M:0;5ed4808ef0e6:42535 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:28:33,840 INFO [M:0;5ed4808ef0e6:42535 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:28:33,840 DEBUG [M:0;5ed4808ef0e6:42535 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:28:33,840 INFO [M:0;5ed4808ef0e6:42535 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:28:33,840 INFO [M:0;5ed4808ef0e6:42535 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:28:33,840 INFO [M:0;5ed4808ef0e6:42535 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:28:33,840 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148877788 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148877788,5,FailOnTimeoutGroup] 2024-11-21T00:28:33,840 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:28:33,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01310799061/master 2024-11-21T00:28:33,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01310799061 2024-11-21T00:28:33,850 DEBUG [M:0;5ed4808ef0e6:42535 {}] zookeeper.ZKUtil(347): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Unable to get data of znode /01310799061/master because node does not exist (not an error) 2024-11-21T00:28:33,850 WARN [M:0;5ed4808ef0e6:42535 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-21T00:28:33,850 INFO [M:0;5ed4808ef0e6:42535 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/.lastflushedseqids 2024-11-21T00:28:33,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741848_1024 (size=245) 2024-11-21T00:28:33,892 INFO [M:0;5ed4808ef0e6:42535 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:28:33,892 INFO [M:0;5ed4808ef0e6:42535 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:28:33,892 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:33,892 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:33,892 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:33,892 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:33,892 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:33,892 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=55.77 KB heapSize=65.75 KB 2024-11-21T00:28:33,921 DEBUG [M:0;5ed4808ef0e6:42535 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bda047fff2f9460db3eb3abc57b1ccfe is 82, key is hbase:meta,,1/info:regioninfo/1732148878573/Put/seqid=0 2024-11-21T00:28:33,929 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:33,929 DEBUG [pool-1939-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45749-0x1015aca43ea0001, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:33,929 INFO [RS:0;5ed4808ef0e6:45749 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:28:33,929 INFO [RS:0;5ed4808ef0e6:45749 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,45749,1732148876424; zookeeper connection closed. 2024-11-21T00:28:33,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741849_1025 (size=5672) 2024-11-21T00:28:33,940 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bda047fff2f9460db3eb3abc57b1ccfe 2024-11-21T00:28:33,940 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@190c728c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@190c728c 2024-11-21T00:28:33,941 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:28:34,005 DEBUG [M:0;5ed4808ef0e6:42535 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/79c403a8edce470781837622783f64a5 is 1247, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732148890293/Put/seqid=0 2024-11-21T00:28:34,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741850_1026 (size=7219) 2024-11-21T00:28:34,456 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.21 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/79c403a8edce470781837622783f64a5 2024-11-21T00:28:34,469 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 79c403a8edce470781837622783f64a5 2024-11-21T00:28:34,497 DEBUG [M:0;5ed4808ef0e6:42535 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/206071390ce3400599502e2f55982c9b is 69, key is 5ed4808ef0e6,45749,1732148876424/rs:state/1732148877842/Put/seqid=0 2024-11-21T00:28:34,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741851_1027 (size=5156) 2024-11-21T00:28:34,573 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/206071390ce3400599502e2f55982c9b 2024-11-21T00:28:34,591 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bda047fff2f9460db3eb3abc57b1ccfe as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bda047fff2f9460db3eb3abc57b1ccfe 2024-11-21T00:28:34,614 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bda047fff2f9460db3eb3abc57b1ccfe, entries=8, sequenceid=105, filesize=5.5 K 2024-11-21T00:28:34,619 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/79c403a8edce470781837622783f64a5 as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/79c403a8edce470781837622783f64a5 2024-11-21T00:28:34,627 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 79c403a8edce470781837622783f64a5 2024-11-21T00:28:34,627 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/79c403a8edce470781837622783f64a5, entries=11, sequenceid=105, filesize=7.0 K 2024-11-21T00:28:34,629 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/206071390ce3400599502e2f55982c9b as hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/206071390ce3400599502e2f55982c9b 2024-11-21T00:28:34,635 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40339/user/jenkins/test-data/06edb485-6836-de3b-45f8-ae5ad89e61b5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/206071390ce3400599502e2f55982c9b, entries=1, sequenceid=105, filesize=5.0 K 2024-11-21T00:28:34,636 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(3140): Finished flush of dataSize ~55.77 KB/57104, heapSize ~65.45 KB/67024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 744ms, sequenceid=105, compaction requested=false 2024-11-21T00:28:34,658 INFO [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:34,658 DEBUG [M:0;5ed4808ef0e6:42535 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148913892Disabling compacts and flushes for region at 1732148913892Disabling writes for close at 1732148913892Obtaining lock to block concurrent updates at 1732148913892Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148913892Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=57104, getHeapSize=67264, getOffHeapSize=0, getCellsCount=122 at 1732148913893 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148913896 (+3 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148913897 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148913921 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148913921Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148913954 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148914004 (+50 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148914004Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148914469 (+465 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148914493 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148914493Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57abb8ca: reopening flushed file at 1732148914583 (+90 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62b2dc05: reopening flushed file at 1732148914614 (+31 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c41750c: reopening flushed file at 1732148914628 (+14 ms)Finished flush of dataSize ~55.77 KB/57104, heapSize ~65.45 KB/67024, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 744ms, sequenceid=105, compaction requested=false at 1732148914636 (+8 ms)Writing region close event to WAL at 1732148914658 (+22 ms)Closed at 1732148914658 2024-11-21T00:28:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42735 is added to blk_1073741830_1006 (size=64435) 2024-11-21T00:28:34,698 INFO [M:0;5ed4808ef0e6:42535 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:28:34,698 INFO [M:0;5ed4808ef0e6:42535 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42535 2024-11-21T00:28:34,699 INFO [M:0;5ed4808ef0e6:42535 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:28:34,699 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:28:34,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:34,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42535-0x1015aca43ea0000, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:28:34,929 INFO [M:0;5ed4808ef0e6:42535 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:28:34,970 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2f7bb3d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:34,971 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@24595238{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:28:34,971 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:28:34,971 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13cefcc8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:28:34,971 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a453c14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.log.dir/,STOPPED} 2024-11-21T00:28:34,974 WARN [BP-177379055-172.17.0.2-1732148873310 heartbeating to localhost/127.0.0.1:40339 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:28:34,974 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:28:34,974 WARN [BP-177379055-172.17.0.2-1732148873310 heartbeating to localhost/127.0.0.1:40339 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-177379055-172.17.0.2-1732148873310 (Datanode Uuid 6a723d48-d1fb-4285-a935-9132bf25010a) service to localhost/127.0.0.1:40339 2024-11-21T00:28:34,974 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:28:34,975 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/data/data1/current/BP-177379055-172.17.0.2-1732148873310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:28:34,975 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/cluster_c02b9828-f3e8-e9f4-c7e5-d1285f0e6e5f/data/data2/current/BP-177379055-172.17.0.2-1732148873310 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:28:34,975 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:28:34,984 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@28a54498{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:34,989 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@f9fe0bd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:28:34,989 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:28:34,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1afd8979{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:28:34,989 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bbc206{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/ee176b3a-d617-caad-562d-b7bb8cb9ef11/hadoop.log.dir/,STOPPED} 2024-11-21T00:28:35,000 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:28:35,016 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:28:35,053 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testCyclicReplication3 Thread=599 (was 550) Potentially hanging thread: HMaster-EventLoopGroup-37-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:40339 from jenkins.hfs.17 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-39-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41775 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-39-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-38-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41775 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35655 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-37-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-42-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35655 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.18@localhost:41775 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: globalEventExecutor-1-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//io.netty.util.concurrent.GlobalEventExecutor.takeTask(GlobalEventExecutor.java:113) app//io.netty.util.concurrent.GlobalEventExecutor$TaskRunner.run(GlobalEventExecutor.java:259) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-38-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.17@localhost:40339 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:60103) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35655 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-39-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.19@localhost:35655 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-41-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41775 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-41-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-41-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:60103) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35655 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40339 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41775 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-40-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:40339 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-40-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40339 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40339 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-40-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41775 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:41775 from jenkins.hfs.18 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40339 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35655 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:60103) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:40339 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41775 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:35655 from jenkins.hfs.19 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35655 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-38-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-37-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41775 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-42-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-42-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=938 (was 880) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=993 (was 991) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2047 (was 505) - AvailableMemoryMB LEAK? - 2024-11-21T00:28:35,053 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=599 is superior to 500 2024-11-21T00:28:35,087 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testLoopedReplication Thread=599, OpenFileDescriptor=938, MaxFileDescriptor=1048576, SystemLoadAverage=993, ProcessCount=11, AvailableMemoryMB=2045 2024-11-21T00:28:35,088 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=599 is superior to 500 2024-11-21T00:28:35,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015aca43ea0002, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:28:35,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015aca43ea0005, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:28:35,095 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015aca43ea0002, quorum=127.0.0.1:60103, baseZNode=/01310799061 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:28:35,095 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015aca43ea0005, quorum=127.0.0.1:60103, baseZNode=/1-528321262 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:28:35,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster2-0x1015aca43ea0008, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:28:35,096 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster2-0x1015aca43ea0008, quorum=127.0.0.1:60103, baseZNode=/21019767428 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:28:35,130 INFO [Time-limited test {}] replication.TestMasterReplication(178): testLoopedReplication 2024-11-21T00:28:35,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.log.dir so I do NOT create it in target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1 2024-11-21T00:28:35,131 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:28:35,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.tmp.dir so I do NOT create it in target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1 2024-11-21T00:28:35,131 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/492ee4e5-0b24-7745-51f4-302e2e4fdbf3/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:28:35,131 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1 2024-11-21T00:28:35,131 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c, deleteOnExit=true 2024-11-21T00:28:35,205 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/zookeeper_0, clientPort=62591, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:28:35,233 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62591 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/test.cache.data in system properties and HBase conf 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:28:35,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:28:35,234 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:28:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:28:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:28:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:28:35,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:35,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:35,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:28:35,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:28:35,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:28:35,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:35,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:28:35,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:28:35,720 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:35,724 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:35,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:35,768 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:35,769 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:28:35,781 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:35,784 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a42090{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:35,785 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@125c1a64{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:35,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7afaa662{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/java.io.tmpdir/jetty-localhost-36023-hadoop-hdfs-3_4_1-tests_jar-_-any-7541286524549239236/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:35,946 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3aed97b0{HTTP/1.1, (http/1.1)}{localhost:36023} 2024-11-21T00:28:35,946 INFO [Time-limited test {}] server.Server(415): Started @650496ms 2024-11-21T00:28:36,570 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:36,575 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:36,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:36,585 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:36,585 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:28:36,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@684bdb27{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:36,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a59e830{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:36,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4eee926{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/java.io.tmpdir/jetty-localhost-46287-hadoop-hdfs-3_4_1-tests_jar-_-any-3226469797578648180/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:36,753 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6afe60a5{HTTP/1.1, (http/1.1)}{localhost:46287} 2024-11-21T00:28:36,753 INFO [Time-limited test {}] server.Server(415): Started @651302ms 2024-11-21T00:28:36,756 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:28:37,884 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:37,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,977 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,978 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,979 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:37,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:38,005 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:38,076 WARN [Thread-4224 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/data/data1/current/BP-1072873683-172.17.0.2-1732148915257/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:38,091 WARN [Thread-4225 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/data/data2/current/BP-1072873683-172.17.0.2-1732148915257/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:38,205 WARN [Thread-4211 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:28:38,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x732d6f719db7387e with lease ID 0xc92efdb56233ec22: Processing first storage report for DS-5f19f0a1-22b4-46e6-b52d-b3a6faf3c326 from datanode DatanodeRegistration(127.0.0.1:39979, datanodeUuid=fcdd280f-a78d-495b-91cf-aabbfdf73eb6, infoPort=32995, infoSecurePort=0, ipcPort=46367, storageInfo=lv=-57;cid=testClusterID;nsid=1191560752;c=1732148915257) 2024-11-21T00:28:38,217 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x732d6f719db7387e with lease ID 0xc92efdb56233ec22: from storage DS-5f19f0a1-22b4-46e6-b52d-b3a6faf3c326 node DatanodeRegistration(127.0.0.1:39979, datanodeUuid=fcdd280f-a78d-495b-91cf-aabbfdf73eb6, infoPort=32995, infoSecurePort=0, ipcPort=46367, storageInfo=lv=-57;cid=testClusterID;nsid=1191560752;c=1732148915257), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:38,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x732d6f719db7387e with lease ID 0xc92efdb56233ec22: Processing first storage report for DS-7c519edb-1190-43ed-b01c-9d6ce2d3f138 from datanode DatanodeRegistration(127.0.0.1:39979, datanodeUuid=fcdd280f-a78d-495b-91cf-aabbfdf73eb6, infoPort=32995, infoSecurePort=0, ipcPort=46367, storageInfo=lv=-57;cid=testClusterID;nsid=1191560752;c=1732148915257) 2024-11-21T00:28:38,217 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x732d6f719db7387e with lease ID 0xc92efdb56233ec22: from storage DS-7c519edb-1190-43ed-b01c-9d6ce2d3f138 node DatanodeRegistration(127.0.0.1:39979, datanodeUuid=fcdd280f-a78d-495b-91cf-aabbfdf73eb6, infoPort=32995, infoSecurePort=0, ipcPort=46367, storageInfo=lv=-57;cid=testClusterID;nsid=1191560752;c=1732148915257), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:38,288 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1 2024-11-21T00:28:38,288 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:38,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:38,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:28:38,349 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616 with version=8 2024-11-21T00:28:38,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/hbase-staging 2024-11-21T00:28:38,370 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:38,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:38,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:38,370 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:38,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:38,370 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:38,370 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:28:38,371 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:38,376 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42061 2024-11-21T00:28:38,378 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42061 connecting to ZooKeeper ensemble=127.0.0.1:62591 2024-11-21T00:28:38,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420610x0, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:38,545 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42061-0x1015acae7810000 connected 2024-11-21T00:28:38,682 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:38,683 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:38,692 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on znode that does not yet exist, /0748125295/running 2024-11-21T00:28:38,693 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616, hbase.cluster.distributed=false 2024-11-21T00:28:38,694 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on znode that does not yet exist, /0748125295/acl 2024-11-21T00:28:38,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42061 2024-11-21T00:28:38,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42061 2024-11-21T00:28:38,733 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42061 2024-11-21T00:28:38,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42061 2024-11-21T00:28:38,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42061 2024-11-21T00:28:38,768 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:38,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:38,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:38,768 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:38,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:38,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:38,768 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:28:38,768 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:38,769 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45589 2024-11-21T00:28:38,770 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45589 connecting to ZooKeeper ensemble=127.0.0.1:62591 2024-11-21T00:28:38,771 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:38,773 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:38,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:455890x0, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:38,785 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:455890x0, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on znode that does not yet exist, /0748125295/running 2024-11-21T00:28:38,785 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:28:38,788 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45589-0x1015acae7810001 connected 2024-11-21T00:28:38,800 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:28:38,801 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on znode that does not yet exist, /0748125295/master 2024-11-21T00:28:38,802 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on znode that does not yet exist, /0748125295/acl 2024-11-21T00:28:38,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45589 2024-11-21T00:28:38,840 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45589 2024-11-21T00:28:38,848 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45589 2024-11-21T00:28:38,852 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45589 2024-11-21T00:28:38,860 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45589 2024-11-21T00:28:38,876 DEBUG [M:0;5ed4808ef0e6:42061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:42061 2024-11-21T00:28:38,881 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /0748125295/backup-masters/5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:38,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295/backup-masters 2024-11-21T00:28:38,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295/backup-masters 2024-11-21T00:28:38,890 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on existing znode=/0748125295/backup-masters/5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:38,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0748125295/master 2024-11-21T00:28:38,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:38,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:38,899 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on existing znode=/0748125295/master 2024-11-21T00:28:38,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /0748125295/backup-masters/5ed4808ef0e6,42061,1732148918370 from backup master directory 2024-11-21T00:28:38,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295/backup-masters 2024-11-21T00:28:38,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/0748125295/backup-masters/5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:38,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295/backup-masters 2024-11-21T00:28:38,911 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:38,911 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:38,929 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/hbase.id] with ID: d5ea2a7c-2130-4fbd-80af-0ed435ca6493 2024-11-21T00:28:38,929 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/.tmp/hbase.id 2024-11-21T00:28:38,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:28:38,964 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/.tmp/hbase.id]:[hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/hbase.id] 2024-11-21T00:28:38,992 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:38,992 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:28:38,993 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:28:39,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:39,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:39,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:28:39,474 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:39,474 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:28:39,478 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:39,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:28:39,538 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/data/master/store 2024-11-21T00:28:39,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:28:39,568 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:39,569 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:39,569 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:39,569 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:39,569 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:39,569 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:39,569 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:39,569 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148919568Disabling compacts and flushes for region at 1732148919568Disabling writes for close at 1732148919569 (+1 ms)Writing region close event to WAL at 1732148919569Closed at 1732148919569 2024-11-21T00:28:39,569 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/data/master/store/.initializing 2024-11-21T00:28:39,570 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/WALs/5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:39,571 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:39,572 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C42061%2C1732148918370, suffix=, logDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/WALs/5ed4808ef0e6,42061,1732148918370, archiveDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/oldWALs, maxLogs=10 2024-11-21T00:28:39,598 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/WALs/5ed4808ef0e6,42061,1732148918370/5ed4808ef0e6%2C42061%2C1732148918370.1732148919572, exclude list is [], retry=0 2024-11-21T00:28:39,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39979,DS-5f19f0a1-22b4-46e6-b52d-b3a6faf3c326,DISK] 2024-11-21T00:28:39,608 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/WALs/5ed4808ef0e6,42061,1732148918370/5ed4808ef0e6%2C42061%2C1732148918370.1732148919572 2024-11-21T00:28:39,624 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32995:32995)] 2024-11-21T00:28:39,624 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:39,625 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:39,625 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,625 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:28:39,627 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:39,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,628 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:28:39,628 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:39,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:28:39,639 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:39,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:28:39,643 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:39,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:39,645 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,648 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,649 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,653 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:39,654 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:39,664 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:39,664 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68199542, jitterRate=0.016252368688583374}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:39,664 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148919625Initializing all the Stores at 1732148919625Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148919625Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148919626 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148919626Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148919626Cleaning up temporary data from old regions at 1732148919652 (+26 ms)Region opened successfully at 1732148919664 (+12 ms) 2024-11-21T00:28:39,674 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:28:39,677 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bc61ae8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:39,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:28:39,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:28:39,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:28:39,678 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:28:39,679 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:28:39,679 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:28:39,679 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:28:39,694 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:28:39,694 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Unable to get data of znode /0748125295/balancer because node does not exist (not necessarily an error) 2024-11-21T00:28:39,706 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0748125295/balancer already deleted, retry=false 2024-11-21T00:28:39,706 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:28:39,714 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Unable to get data of znode /0748125295/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:28:39,728 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0748125295/normalizer already deleted, retry=false 2024-11-21T00:28:39,728 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:28:39,733 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Unable to get data of znode /0748125295/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:28:39,740 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0748125295/switch/split already deleted, retry=false 2024-11-21T00:28:39,752 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Unable to get data of znode /0748125295/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:28:39,762 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0748125295/switch/merge already deleted, retry=false 2024-11-21T00:28:39,771 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Unable to get data of znode /0748125295/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:28:39,782 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /0748125295/snapshot-cleanup already deleted, retry=false 2024-11-21T00:28:39,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0748125295/running 2024-11-21T00:28:39,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:39,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/0748125295/running 2024-11-21T00:28:39,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:39,894 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,42061,1732148918370, sessionid=0x1015acae7810000, setting cluster-up flag (Was=false) 2024-11-21T00:28:39,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:39,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:40,035 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0748125295/flush-table-proc/acquired, /0748125295/flush-table-proc/reached, /0748125295/flush-table-proc/abort 2024-11-21T00:28:40,037 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:40,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:40,069 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:40,098 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /0748125295/online-snapshot/acquired, /0748125295/online-snapshot/reached, /0748125295/online-snapshot/abort 2024-11-21T00:28:40,101 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:40,135 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:28:40,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:40,139 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:28:40,139 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:28:40,140 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,42061,1732148918370 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:40,146 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,196 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:40,196 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:28:40,198 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,198 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:28:40,204 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(746): ClusterId : d5ea2a7c-2130-4fbd-80af-0ed435ca6493 2024-11-21T00:28:40,204 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:28:40,215 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:28:40,215 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:28:40,220 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148950220 2024-11-21T00:28:40,220 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:28:40,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:28:40,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:28:40,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:28:40,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:28:40,221 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:28:40,225 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:28:40,226 DEBUG [RS:0;5ed4808ef0e6:45589 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd3f0d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:40,231 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,240 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:28:40,240 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:28:40,240 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:28:40,240 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:28:40,265 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:28:40,266 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:28:40,266 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148920266,5,FailOnTimeoutGroup] 2024-11-21T00:28:40,271 DEBUG [RS:0;5ed4808ef0e6:45589 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:45589 2024-11-21T00:28:40,272 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148920266,5,FailOnTimeoutGroup] 2024-11-21T00:28:40,272 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,272 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:28:40,272 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,272 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,274 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:28:40,274 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:28:40,274 DEBUG [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:28:40,275 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,42061,1732148918370 with port=45589, startcode=1732148918767 2024-11-21T00:28:40,275 DEBUG [RS:0;5ed4808ef0e6:45589 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:28:40,289 INFO [HMaster-EventLoopGroup-43-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45423, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.20 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:28:40,289 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42061 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:40,289 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42061 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:40,291 DEBUG [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616 2024-11-21T00:28:40,291 DEBUG [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45307 2024-11-21T00:28:40,291 DEBUG [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:28:40,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295/rs 2024-11-21T00:28:40,307 DEBUG [RS:0;5ed4808ef0e6:45589 {}] zookeeper.ZKUtil(111): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Set watcher on existing znode=/0748125295/rs/5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:40,307 WARN [RS:0;5ed4808ef0e6:45589 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:40,307 INFO [RS:0;5ed4808ef0e6:45589 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:40,307 DEBUG [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/WALs/5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:40,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:28:40,337 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,45589,1732148918767] 2024-11-21T00:28:40,377 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:28:40,392 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:28:40,405 INFO [RS:0;5ed4808ef0e6:45589 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:28:40,405 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,416 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:28:40,417 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:28:40,417 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,417 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,417 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,417 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,417 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,417 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,417 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:40,418 DEBUG [RS:0;5ed4808ef0e6:45589 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:40,440 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,440 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,440 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,440 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,440 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,440 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,45589,1732148918767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:40,471 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:28:40,471 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,45589,1732148918767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,471 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,472 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.Replication(171): 5ed4808ef0e6,45589,1732148918767 started 2024-11-21T00:28:40,491 INFO [RS:0;5ed4808ef0e6:45589 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:40,491 INFO [RS:0;5ed4808ef0e6:45589 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,45589,1732148918767, RpcServer on 5ed4808ef0e6/172.17.0.2:45589, sessionid=0x1015acae7810001 2024-11-21T00:28:40,491 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:28:40,491 DEBUG [RS:0;5ed4808ef0e6:45589 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:40,492 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,45589,1732148918767' 2024-11-21T00:28:40,492 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0748125295/flush-table-proc/abort' 2024-11-21T00:28:40,492 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0748125295/flush-table-proc/acquired' 2024-11-21T00:28:40,493 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:28:40,493 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:28:40,493 DEBUG [RS:0;5ed4808ef0e6:45589 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:40,493 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,45589,1732148918767' 2024-11-21T00:28:40,493 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/0748125295/online-snapshot/abort' 2024-11-21T00:28:40,493 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/0748125295/online-snapshot/acquired' 2024-11-21T00:28:40,493 DEBUG [RS:0;5ed4808ef0e6:45589 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:28:40,493 INFO [RS:0;5ed4808ef0e6:45589 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:28:40,493 INFO [RS:0;5ed4808ef0e6:45589 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:28:40,594 INFO [RS:0;5ed4808ef0e6:45589 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:40,596 INFO [RS:0;5ed4808ef0e6:45589 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45589%2C1732148918767, suffix=, logDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/WALs/5ed4808ef0e6,45589,1732148918767, archiveDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/oldWALs, maxLogs=10 2024-11-21T00:28:40,621 DEBUG [RS:0;5ed4808ef0e6:45589 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/WALs/5ed4808ef0e6,45589,1732148918767/5ed4808ef0e6%2C45589%2C1732148918767.1732148920597, exclude list is [], retry=0 2024-11-21T00:28:40,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39979,DS-5f19f0a1-22b4-46e6-b52d-b3a6faf3c326,DISK] 2024-11-21T00:28:40,646 INFO [RS:0;5ed4808ef0e6:45589 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/WALs/5ed4808ef0e6,45589,1732148918767/5ed4808ef0e6%2C45589%2C1732148918767.1732148920597 2024-11-21T00:28:40,652 DEBUG [RS:0;5ed4808ef0e6:45589 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32995:32995)] 2024-11-21T00:28:40,722 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:28:40,723 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616 2024-11-21T00:28:40,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:28:40,750 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:40,760 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:40,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:40,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:40,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:40,763 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:40,763 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:40,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:40,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:40,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:40,775 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:40,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:40,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:40,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:40,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:40,783 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/hbase/meta/1588230740 2024-11-21T00:28:40,783 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/hbase/meta/1588230740 2024-11-21T00:28:40,784 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:40,784 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:40,784 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:40,786 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:40,793 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:40,794 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66855466, jitterRate=-0.0037759244441986084}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:40,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148920750Initializing all the Stores at 1732148920751 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148920751Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148920760 (+9 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148920760Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148920760Cleaning up temporary data from old regions at 1732148920784 (+24 ms)Region opened successfully at 1732148920795 (+11 ms) 2024-11-21T00:28:40,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:40,795 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:40,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:40,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:40,795 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:40,796 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:40,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148920795Disabling compacts and flushes for region at 1732148920795Disabling writes for close at 1732148920795Writing region close event to WAL at 1732148920796 (+1 ms)Closed at 1732148920796 2024-11-21T00:28:40,797 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:40,797 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:28:40,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:28:40,799 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:40,800 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:28:40,951 DEBUG [5ed4808ef0e6:42061 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:28:40,951 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:40,952 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,45589,1732148918767, state=OPENING 2024-11-21T00:28:40,972 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:28:40,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:40,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/0748125295 2024-11-21T00:28:40,984 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0748125295/meta-region-server: CHANGED 2024-11-21T00:28:40,984 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0748125295/meta-region-server: CHANGED 2024-11-21T00:28:40,984 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:40,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45589,1732148918767}] 2024-11-21T00:28:41,153 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:41,157 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50809, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:28:41,190 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:28:41,190 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:41,190 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:28:41,192 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C45589%2C1732148918767.meta, suffix=.meta, logDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/WALs/5ed4808ef0e6,45589,1732148918767, archiveDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/oldWALs, maxLogs=10 2024-11-21T00:28:41,222 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/WALs/5ed4808ef0e6,45589,1732148918767/5ed4808ef0e6%2C45589%2C1732148918767.meta.1732148921193.meta, exclude list is [], retry=0 2024-11-21T00:28:41,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39979,DS-5f19f0a1-22b4-46e6-b52d-b3a6faf3c326,DISK] 2024-11-21T00:28:41,250 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/WALs/5ed4808ef0e6,45589,1732148918767/5ed4808ef0e6%2C45589%2C1732148918767.meta.1732148921193.meta 2024-11-21T00:28:41,250 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:32995:32995)] 2024-11-21T00:28:41,251 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:41,251 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:41,251 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:41,251 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:28:41,251 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:28:41,252 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:28:41,252 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:41,252 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:28:41,252 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:28:41,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:41,254 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:41,254 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:41,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:41,255 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:41,255 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:41,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:41,257 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:41,257 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:41,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:41,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:41,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,258 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:41,259 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:41,261 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/hbase/meta/1588230740 2024-11-21T00:28:41,262 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/hbase/meta/1588230740 2024-11-21T00:28:41,263 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:41,263 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:41,264 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:41,267 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:41,268 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73554598, jitterRate=0.09604892134666443}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:41,268 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:28:41,268 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148921252Writing region info on filesystem at 1732148921252Initializing all the Stores at 1732148921253 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148921253Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148921253Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148921253Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148921253Cleaning up temporary data from old regions at 1732148921263 (+10 ms)Running coprocessor post-open hooks at 1732148921268 (+5 ms)Region opened successfully at 1732148921268 2024-11-21T00:28:41,270 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148921152 2024-11-21T00:28:41,273 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:28:41,273 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:28:41,274 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:41,275 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,45589,1732148918767, state=OPEN 2024-11-21T00:28:41,320 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45589-0x1015acae7810001, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0748125295/meta-region-server 2024-11-21T00:28:41,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42061-0x1015acae7810000, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/0748125295/meta-region-server 2024-11-21T00:28:41,321 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0748125295/meta-region-server: CHANGED 2024-11-21T00:28:41,321 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /0748125295/meta-region-server: CHANGED 2024-11-21T00:28:41,321 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:41,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:28:41,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,45589,1732148918767 in 337 msec 2024-11-21T00:28:41,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:28:41,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 526 msec 2024-11-21T00:28:41,327 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:41,327 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:28:41,329 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:41,329 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45589,1732148918767, seqNum=-1] 2024-11-21T00:28:41,329 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:41,330 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-44-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33893, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:41,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2020 sec 2024-11-21T00:28:41,342 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148921342, completionTime=-1 2024-11-21T00:28:41,342 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:28:41,343 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148981345 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732149041345 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42061,1732148918370-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42061,1732148918370-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42061,1732148918370-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:42061, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:41,345 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:41,347 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:28:41,352 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:41,356 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.445sec 2024-11-21T00:28:41,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:28:41,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:28:41,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:28:41,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:28:41,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:28:41,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42061,1732148918370-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:41,357 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42061,1732148918370-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:28:41,378 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:28:41,378 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:28:41,378 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,42061,1732148918370-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:41,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d4d26fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:41,416 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42061,-1 for getting cluster id 2024-11-21T00:28:41,417 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:41,418 DEBUG [HMaster-EventLoopGroup-43-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd5ea2a7c-2130-4fbd-80af-0ed435ca6493' 2024-11-21T00:28:41,418 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:41,418 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d5ea2a7c-2130-4fbd-80af-0ed435ca6493" 2024-11-21T00:28:41,418 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75022e68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:41,418 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42061,-1] 2024-11-21T00:28:41,418 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:41,418 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:41,419 INFO [HMaster-EventLoopGroup-43-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48392, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:41,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29e498b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:41,420 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:41,421 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,45589,1732148918767, seqNum=-1] 2024-11-21T00:28:41,422 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:41,423 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-44-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51684, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:41,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:41,425 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:62591 2024-11-21T00:28:41,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:62591, baseZNode=/0748125295 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:41,487 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:41,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015acae7810002 connected 2024-11-21T00:28:41,490 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:41,490 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@35f325aa 2024-11-21T00:28:41,490 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:41,491 INFO [HMaster-EventLoopGroup-43-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48394, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:41,492 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:41,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:28:41,495 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:41,495 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:28:41,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:41,496 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:41,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:28:41,534 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 238f602fab681072bd88b598de737ec1, NAME => 'test,,1732148921491.238f602fab681072bd88b598de737ec1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616 2024-11-21T00:28:41,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39979 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:28:41,579 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148921491.238f602fab681072bd88b598de737ec1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:41,579 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing 238f602fab681072bd88b598de737ec1, disabling compactions & flushes 2024-11-21T00:28:41,579 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148921491.238f602fab681072bd88b598de737ec1. 2024-11-21T00:28:41,579 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148921491.238f602fab681072bd88b598de737ec1. 2024-11-21T00:28:41,579 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148921491.238f602fab681072bd88b598de737ec1. after waiting 0 ms 2024-11-21T00:28:41,579 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148921491.238f602fab681072bd88b598de737ec1. 2024-11-21T00:28:41,579 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148921491.238f602fab681072bd88b598de737ec1. 2024-11-21T00:28:41,579 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for 238f602fab681072bd88b598de737ec1: Waiting for close lock at 1732148921579Disabling compacts and flushes for region at 1732148921579Disabling writes for close at 1732148921579Writing region close event to WAL at 1732148921579Closed at 1732148921579 2024-11-21T00:28:41,581 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:41,582 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148921491.238f602fab681072bd88b598de737ec1.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148921581"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148921581"}]},"ts":"1732148921581"} 2024-11-21T00:28:41,584 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:41,586 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:41,586 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148921586"}]},"ts":"1732148921586"} 2024-11-21T00:28:41,587 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:28:41,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=238f602fab681072bd88b598de737ec1, ASSIGN}] 2024-11-21T00:28:41,589 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=238f602fab681072bd88b598de737ec1, ASSIGN 2024-11-21T00:28:41,592 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=238f602fab681072bd88b598de737ec1, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,45589,1732148918767; forceNewPlan=false, retain=false 2024-11-21T00:28:41,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:41,745 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=238f602fab681072bd88b598de737ec1, regionState=OPENING, regionLocation=5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:41,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=238f602fab681072bd88b598de737ec1, ASSIGN because future has completed 2024-11-21T00:28:41,765 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 238f602fab681072bd88b598de737ec1, server=5ed4808ef0e6,45589,1732148918767}] 2024-11-21T00:28:41,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:41,936 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148921491.238f602fab681072bd88b598de737ec1. 2024-11-21T00:28:41,936 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 238f602fab681072bd88b598de737ec1, NAME => 'test,,1732148921491.238f602fab681072bd88b598de737ec1.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:41,936 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:41,936 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:28:41,937 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,937 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148921491.238f602fab681072bd88b598de737ec1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:41,937 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,937 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,952 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,956 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 238f602fab681072bd88b598de737ec1 columnFamilyName f 2024-11-21T00:28:41,956 DEBUG [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,958 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] regionserver.HStore(327): Store=238f602fab681072bd88b598de737ec1/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:41,958 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,960 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 238f602fab681072bd88b598de737ec1 columnFamilyName f1 2024-11-21T00:28:41,960 DEBUG [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,960 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] regionserver.HStore(327): Store=238f602fab681072bd88b598de737ec1/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:41,960 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,961 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 238f602fab681072bd88b598de737ec1 columnFamilyName norep 2024-11-21T00:28:41,961 DEBUG [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:41,961 INFO [StoreOpener-238f602fab681072bd88b598de737ec1-1 {}] regionserver.HStore(327): Store=238f602fab681072bd88b598de737ec1/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:41,968 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,969 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/default/test/238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,969 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/default/test/238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,970 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,970 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,971 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:41,971 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,985 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/data/default/test/238f602fab681072bd88b598de737ec1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:41,985 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 238f602fab681072bd88b598de737ec1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65969616, jitterRate=-0.016976118087768555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:41,985 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 238f602fab681072bd88b598de737ec1 2024-11-21T00:28:41,986 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 238f602fab681072bd88b598de737ec1: Running coprocessor pre-open hook at 1732148921937Writing region info on filesystem at 1732148921937Initializing all the Stores at 1732148921937Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148921937Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148921952 (+15 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148921952Cleaning up temporary data from old regions at 1732148921970 (+18 ms)Running coprocessor post-open hooks at 1732148921986 (+16 ms)Region opened successfully at 1732148921986 2024-11-21T00:28:41,987 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148921491.238f602fab681072bd88b598de737ec1., pid=6, masterSystemTime=1732148921917 2024-11-21T00:28:41,994 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148921491.238f602fab681072bd88b598de737ec1. 2024-11-21T00:28:41,994 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148921491.238f602fab681072bd88b598de737ec1. 2024-11-21T00:28:41,994 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=238f602fab681072bd88b598de737ec1, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,45589,1732148918767 2024-11-21T00:28:41,995 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 238f602fab681072bd88b598de737ec1, server=5ed4808ef0e6,45589,1732148918767 because future has completed 2024-11-21T00:28:41,998 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42061 {}] assignment.AssignmentManager(1535): Unable to acquire lock for regionNode state=OPEN, location=5ed4808ef0e6,45589,1732148918767, table=test, region=238f602fab681072bd88b598de737ec1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-21T00:28:42,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:28:42,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 238f602fab681072bd88b598de737ec1, server=5ed4808ef0e6,45589,1732148918767 in 251 msec 2024-11-21T00:28:42,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:28:42,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=238f602fab681072bd88b598de737ec1, ASSIGN in 430 msec 2024-11-21T00:28:42,025 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:42,025 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148922025"}]},"ts":"1732148922025"} 2024-11-21T00:28:42,028 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:28:42,029 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:42,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 537 msec 2024-11-21T00:28:42,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:42,128 INFO [RPCClient-NioEventLoopGroup-4-12 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:28:42,128 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f090ce8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:42,128 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42061,-1 for getting cluster id 2024-11-21T00:28:42,128 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:42,129 DEBUG [HMaster-EventLoopGroup-43-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd5ea2a7c-2130-4fbd-80af-0ed435ca6493' 2024-11-21T00:28:42,129 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:42,129 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d5ea2a7c-2130-4fbd-80af-0ed435ca6493" 2024-11-21T00:28:42,129 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b757495, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:42,129 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42061,-1] 2024-11-21T00:28:42,130 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:42,130 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:42,130 INFO [HMaster-EventLoopGroup-43-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42288, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:42,131 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f0b8156, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:42,131 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:42,132 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:42,132 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@18df6b93 2024-11-21T00:28:42,132 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:42,133 INFO [HMaster-EventLoopGroup-43-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42292, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:42,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:42061,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:28:42,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:28:42,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:42,136 DEBUG [PEWorker-2 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:42061' 2024-11-21T00:28:42,137 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@280768af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:42,137 DEBUG [PEWorker-2 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,42061,-1 for getting cluster id 2024-11-21T00:28:42,137 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:42,138 DEBUG [HMaster-EventLoopGroup-43-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd5ea2a7c-2130-4fbd-80af-0ed435ca6493' 2024-11-21T00:28:42,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:42,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d5ea2a7c-2130-4fbd-80af-0ed435ca6493" 2024-11-21T00:28:42,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ac6d671, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:42,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,42061,-1] 2024-11-21T00:28:42,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:42,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:42,139 INFO [HMaster-EventLoopGroup-43-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42302, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:42,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cf65ae7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:42,140 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:42,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,42061,1732148918370 2024-11-21T00:28:42,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1b7beae 2024-11-21T00:28:42,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-44-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:42,141 INFO [HMaster-EventLoopGroup-43-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42304, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:42,142 INFO [PEWorker-2 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-2. 2024-11-21T00:28:42,142 DEBUG [PEWorker-2 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:28:42,142 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:42,142 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:42,142 WARN [PEWorker-2 {}] replication.ModifyPeerProcedure(190): org.apache.hadoop.hbase.master.replication.AddPeerProcedure failed to call pre CP hook or the pre check is failed for peer 1, mark the procedure as failure and give up org.apache.hadoop.hbase.DoNotRetryIOException: Invalid cluster key: hbase+rpc://5ed4808ef0e6:42061, should not replicate to itself for HBaseInterClusterReplicationEndpoint at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:456) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] 2024-11-21T00:28:42,151 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:42,153 ERROR [PEWorker-2 {}] procedure2.ProcedureExecutor(1697): Root Procedure pid=7, state=FAILED, hasLock=true, exception=org.apache.hadoop.hbase.DoNotRetryIOException via master-add-peer:org.apache.hadoop.hbase.DoNotRetryIOException: Invalid cluster key: hbase+rpc://5ed4808ef0e6:42061, should not replicate to itself for HBaseInterClusterReplicationEndpoint; org.apache.hadoop.hbase.master.replication.AddPeerProcedure does not support rollback but the execution failed and try to rollback, code bug? org.apache.hadoop.hbase.procedure2.RemoteProcedureException: org.apache.hadoop.hbase.DoNotRetryIOException: Invalid cluster key: hbase+rpc://5ed4808ef0e6:42061, should not replicate to itself for HBaseInterClusterReplicationEndpoint at org.apache.hadoop.hbase.procedure2.Procedure.setFailure(Procedure.java:765) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:192) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) ~[hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] Caused by: org.apache.hadoop.hbase.DoNotRetryIOException: Invalid cluster key: hbase+rpc://5ed4808ef0e6:42061, should not replicate to itself for HBaseInterClusterReplicationEndpoint at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:456) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) ~[classes/:?] at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) ~[classes/:?] ... 8 more 2024-11-21T00:28:42,154 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1707): Rolled back pid=7, state=ROLLEDBACK, hasLock=true, exception=org.apache.hadoop.hbase.DoNotRetryIOException via master-add-peer:org.apache.hadoop.hbase.DoNotRetryIOException: Invalid cluster key: hbase+rpc://5ed4808ef0e6:42061, should not replicate to itself for HBaseInterClusterReplicationEndpoint; org.apache.hadoop.hbase.master.replication.AddPeerProcedure exec-time=20 msec 2024-11-21T00:28:42,154 WARN [PEWorker-2 {}] procedure2.ProcedureExecutor(2061): Usually this should not happen, we will release the lock before if the procedure is finished, even if the holdLock is true, arrive here means we have some holes where we do not release the lock. And the releaseLock below may fail since the procedure may have already been deleted from the procedure store. 2024-11-21T00:28:42,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:28:42,252 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3009): Operation: ADD_REPLICATION_PEER, peerId: 1 failed with Invalid cluster key: hbase+rpc://5ed4808ef0e6:42061, should not replicate to itself for HBaseInterClusterReplicationEndpoint 2024-11-21T00:28:42,252 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:28:42,252 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:616) at org.apache.hadoop.hbase.replication.TestMasterReplication.testLoopedReplication(TestMasterReplication.java:181) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.ExpectException.evaluate(ExpectException.java:19) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:28:42,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:42,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:42,252 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:28:42,285 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testLoopedReplication Thread=765 (was 599) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: HMaster-EventLoopGroup-43-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-44-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-43-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1072873683-172.17.0.2-1732148915257:blk_1073741830_1006, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 4 on default port 46367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62591) java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@1e8443a8 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: M:0;5ed4808ef0e6:42061 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:625) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@5c948a46[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7b4a8b04 java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 2 on default port 46367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: org.apache.hadoop.util.JvmPauseMonitor$Monitor@c7f74f5 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148920266 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$1.run(HFileCleaner.java:254) Potentially hanging thread: IPC Server handler 1 on default port 45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: HMaster-EventLoopGroup-43-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:45307 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: StorageLocationChecker thread 0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/data/data2) java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Potentially hanging thread: PacketResponder: BP-1072873683-172.17.0.2-1732148915257:blk_1073741832_1008, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server idle connection scanner for port 46367 java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: org.apache.hadoop.util.JvmPauseMonitor$Monitor@3afe3d0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp939121496-7995 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/data/data1) java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Potentially hanging thread: pool-2270-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-44-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616-prefix:5ed4808ef0e6,45589,1732148918767.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62591 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Potentially hanging thread: IPC Server handler 3 on default port 45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: pool-2258-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62591) java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Potentially hanging thread: RS:0;5ed4808ef0e6:45589-longCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.util.StealJobQueue.take(StealJobQueue.java:101) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: Session-HouseKeeper-586ab125-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;5ed4808ef0e6:45589 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:81) app//org.apache.hadoop.hbase.util.Sleeper.sleep(Sleeper.java:64) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:906) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) java.base@17.0.11/java.security.AccessController.executePrivileged(AccessController.java:776) java.base@17.0.11/java.security.AccessController.doPrivileged(AccessController.java:399) java.base@17.0.11/javax.security.auth.Subject.doAs(Subject.java:376) app//org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) app//org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148920266 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner.consumerLoop(HFileCleaner.java:285) app//org.apache.hadoop.hbase.master.cleaner.HFileCleaner$2.run(HFileCleaner.java:269) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-2264-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.20@localhost:45307 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1072873683-172.17.0.2-1732148915257:blk_1073741834_1010, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: CacheReplicationMonitor(1973227644) java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Potentially hanging thread: qtp939121496-7999 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62591) java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Potentially hanging thread: IPC Server handler 4 on default port 45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/data/data1/current/BP-1072873683-172.17.0.2-1732148915257 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp549116892-8042 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2134620338_20 at /127.0.0.1:33390 [Receiving block BP-1072873683-172.17.0.2-1732148915257:blk_1073741834_1010] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: BP-1072873683-172.17.0.2-1732148915257 heartbeating to localhost/127.0.0.1:45307 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5749ced0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-2272-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45307 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2134620338_20 at /127.0.0.1:33314 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1412776663_20 at /127.0.0.1:33360 [Receiving block BP-1072873683-172.17.0.2-1732148915257:blk_1073741830_1006] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@7e43f981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataNode DiskChecker thread 0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@4a20a1aa java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp939121496-7996 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 0 on default port 45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: ProcessThread(sid:0 cport:62591): java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Potentially hanging thread: qtp939121496-7997-acceptor-0@3e06664d-ServerConnector@3aed97b0{HTTP/1.1, (http/1.1)}{localhost:36023} java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp939121496-7994 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: IPC Server handler 0 on default port 46367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: IPC Server handler 3 on default port 46367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: IPC Server handler 2 on default port 45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: pool-2267-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@5d7e4644 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616-prefix:5ed4808ef0e6,45589,1732148918767 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp939121496-8000 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:45307 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Server idle connection scanner for port 45307 java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Session-HouseKeeper-5764ee9b-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45307 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp939121496-7998 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Server handler 1 on default port 46367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Potentially hanging thread: qtp549116892-8043-acceptor-0@702b7d7a-ServerConnector@6afe60a5{HTTP/1.1, (http/1.1)}{localhost:46287} java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp549116892-8045 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@30c1d1ff java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:45307/user/jenkins/test-data/7060c0f7-a7f6-4a21-bdb6-5ee2be30a616/MasterData-prefix:5ed4808ef0e6,42061,1732148918370 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: qtp549116892-8044 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: qtp939121496-7993 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$307/0x00007f205c438000.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: StorageLocationChecker thread 1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: 5ed4808ef0e6:42061 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.waitOnAssignQueue(AssignmentManager.java:2390) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager.processAssignQueue(AssignmentManager.java:2412) app//org.apache.hadoop.hbase.master.assignment.AssignmentManager$1.run(AssignmentManager.java:2352) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:45307 from jenkins.hfs.20 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45589 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-44-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait0(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:193) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWait(EpollEventLoop.java:304) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:368) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: pool-2259-thread-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2134620338_20 at /127.0.0.1:33370 [Receiving block BP-1072873683-172.17.0.2-1732148915257:blk_1073741832_1008] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/cluster_2f31d76f-7a48-dd10-152a-2b9968a07c2c/data/data2/current/BP-1072873683-172.17.0.2-1732148915257 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1016 (was 938) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1052 (was 993) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1816 (was 2045) 2024-11-21T00:28:42,286 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=765 is superior to 500 2024-11-21T00:28:42,309 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: replication.TestMasterReplication#testHFileMultiSlaveReplication Thread=765, OpenFileDescriptor=1016, MaxFileDescriptor=1048576, SystemLoadAverage=1052, ProcessCount=11, AvailableMemoryMB=1816 2024-11-21T00:28:42,309 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=765 is superior to 500 2024-11-21T00:28:42,330 INFO [Time-limited test {}] replication.TestMasterReplication(298): testHFileMultiSlaveReplication 2024-11-21T00:28:42,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.log.dir so I do NOT create it in target/test-data/16926423-955b-e581-6f21-4a09194a24d6 2024-11-21T00:28:42,332 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:28:42,332 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.tmp.dir so I do NOT create it in target/test-data/16926423-955b-e581-6f21-4a09194a24d6 2024-11-21T00:28:42,332 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/0ffccbe9-4e3c-0af9-c3b5-aaf4ffb6c7b1/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:28:42,332 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6 2024-11-21T00:28:42,332 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7, deleteOnExit=true 2024-11-21T00:28:42,361 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7/zookeeper_0, clientPort=62972, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-21T00:28:42,380 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62972 2024-11-21T00:28:42,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:28:42,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:28:42,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/test.cache.data in system properties and HBase conf 2024-11-21T00:28:42,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:28:42,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:28:42,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:28:42,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:28:42,381 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:28:42,381 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:42,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:28:42,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:28:42,850 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:42,869 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:42,949 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:42,949 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:42,949 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:28:42,965 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:42,973 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d3ef9ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:42,974 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28070112{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:43,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12e42a7e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/java.io.tmpdir/jetty-localhost-33505-hadoop-hdfs-3_4_1-tests_jar-_-any-6711879681400413021/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:43,153 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@175d26d5{HTTP/1.1, (http/1.1)}{localhost:33505} 2024-11-21T00:28:43,153 INFO [Time-limited test {}] server.Server(415): Started @657703ms 2024-11-21T00:28:43,352 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:28:43,817 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:43,827 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:43,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:43,858 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:43,858 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:28:43,860 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79393f7f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:43,861 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@225b0623{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:44,014 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64681ff6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/java.io.tmpdir/jetty-localhost-46455-hadoop-hdfs-3_4_1-tests_jar-_-any-9375372645854824731/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:44,015 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e3bc62f{HTTP/1.1, (http/1.1)}{localhost:46455} 2024-11-21T00:28:44,015 INFO [Time-limited test {}] server.Server(415): Started @658564ms 2024-11-21T00:28:44,016 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:28:44,222 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:44,742 WARN [Thread-4359 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7/data/data1/current/BP-613913888-172.17.0.2-1732148922416/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:44,744 WARN [Thread-4360 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7/data/data2/current/BP-613913888-172.17.0.2-1732148922416/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:44,836 WARN [Thread-4346 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:28:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x408fc3000d4876aa with lease ID 0xdb44740c58edc58b: Processing first storage report for DS-823beef4-9d61-40e1-b761-72a26cd5c543 from datanode DatanodeRegistration(127.0.0.1:40739, datanodeUuid=92233fbd-f731-4ba2-ab54-9380f3ce15bc, infoPort=43193, infoSecurePort=0, ipcPort=46509, storageInfo=lv=-57;cid=testClusterID;nsid=345346339;c=1732148922416) 2024-11-21T00:28:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x408fc3000d4876aa with lease ID 0xdb44740c58edc58b: from storage DS-823beef4-9d61-40e1-b761-72a26cd5c543 node DatanodeRegistration(127.0.0.1:40739, datanodeUuid=92233fbd-f731-4ba2-ab54-9380f3ce15bc, infoPort=43193, infoSecurePort=0, ipcPort=46509, storageInfo=lv=-57;cid=testClusterID;nsid=345346339;c=1732148922416), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x408fc3000d4876aa with lease ID 0xdb44740c58edc58b: Processing first storage report for DS-7a98c31b-29f8-4b41-9c72-e1bf990cd91a from datanode DatanodeRegistration(127.0.0.1:40739, datanodeUuid=92233fbd-f731-4ba2-ab54-9380f3ce15bc, infoPort=43193, infoSecurePort=0, ipcPort=46509, storageInfo=lv=-57;cid=testClusterID;nsid=345346339;c=1732148922416) 2024-11-21T00:28:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x408fc3000d4876aa with lease ID 0xdb44740c58edc58b: from storage DS-7a98c31b-29f8-4b41-9c72-e1bf990cd91a node DatanodeRegistration(127.0.0.1:40739, datanodeUuid=92233fbd-f731-4ba2-ab54-9380f3ce15bc, infoPort=43193, infoSecurePort=0, ipcPort=46509, storageInfo=lv=-57;cid=testClusterID;nsid=345346339;c=1732148922416), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:44,913 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6 2024-11-21T00:28:44,913 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:44,914 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:44,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:28:44,940 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd with version=8 2024-11-21T00:28:44,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/hbase-staging 2024-11-21T00:28:44,943 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:44,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:44,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:44,943 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:44,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:44,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:44,943 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:28:44,944 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:44,946 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40563 2024-11-21T00:28:44,947 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40563 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:45,025 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:405630x0, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:45,035 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40563-0x1015acb039c0000 connected 2024-11-21T00:28:45,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:45,166 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:45,168 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on znode that does not yet exist, /01646092936/running 2024-11-21T00:28:45,168 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd, hbase.cluster.distributed=false 2024-11-21T00:28:45,170 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on znode that does not yet exist, /01646092936/acl 2024-11-21T00:28:45,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40563 2024-11-21T00:28:45,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40563 2024-11-21T00:28:45,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40563 2024-11-21T00:28:45,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40563 2024-11-21T00:28:45,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40563 2024-11-21T00:28:45,193 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:45,193 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38737 2024-11-21T00:28:45,194 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38737 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:45,194 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:45,196 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:45,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:387370x0, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:45,212 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:387370x0, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on znode that does not yet exist, /01646092936/running 2024-11-21T00:28:45,212 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:28:45,215 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38737-0x1015acb039c0001 connected 2024-11-21T00:28:45,216 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:28:45,217 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on znode that does not yet exist, /01646092936/master 2024-11-21T00:28:45,218 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on znode that does not yet exist, /01646092936/acl 2024-11-21T00:28:45,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38737 2024-11-21T00:28:45,220 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38737 2024-11-21T00:28:45,224 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38737 2024-11-21T00:28:45,225 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38737 2024-11-21T00:28:45,226 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38737 2024-11-21T00:28:45,236 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:40563 2024-11-21T00:28:45,237 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /01646092936/backup-masters/5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:45,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936/backup-masters 2024-11-21T00:28:45,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936/backup-masters 2024-11-21T00:28:45,249 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on existing znode=/01646092936/backup-masters/5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:45,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:45,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01646092936/master 2024-11-21T00:28:45,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:45,260 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on existing znode=/01646092936/master 2024-11-21T00:28:45,260 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /01646092936/backup-masters/5ed4808ef0e6,40563,1732148924943 from backup master directory 2024-11-21T00:28:45,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01646092936/backup-masters/5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:45,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936/backup-masters 2024-11-21T00:28:45,270 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936/backup-masters 2024-11-21T00:28:45,270 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:45,270 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:45,273 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/hbase.id] with ID: 2769c2a5-6298-47aa-8583-d1e5652dbcb6 2024-11-21T00:28:45,273 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/.tmp/hbase.id 2024-11-21T00:28:45,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:28:45,678 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/.tmp/hbase.id]:[hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/hbase.id] 2024-11-21T00:28:45,689 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:45,689 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:28:45,689 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 0ms. 2024-11-21T00:28:45,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:45,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:45,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:28:45,772 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:45,773 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:28:45,773 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:45,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:28:45,779 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store 2024-11-21T00:28:45,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:28:45,784 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:45,784 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:45,784 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:45,784 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:45,785 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:45,785 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:45,785 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:45,785 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148925784Disabling compacts and flushes for region at 1732148925784Disabling writes for close at 1732148925785 (+1 ms)Writing region close event to WAL at 1732148925785Closed at 1732148925785 2024-11-21T00:28:45,785 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/.initializing 2024-11-21T00:28:45,785 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/WALs/5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:45,786 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:45,787 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C40563%2C1732148924943, suffix=, logDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/WALs/5ed4808ef0e6,40563,1732148924943, archiveDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/oldWALs, maxLogs=10 2024-11-21T00:28:45,804 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/WALs/5ed4808ef0e6,40563,1732148924943/5ed4808ef0e6%2C40563%2C1732148924943.1732148925788, exclude list is [], retry=0 2024-11-21T00:28:45,806 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-823beef4-9d61-40e1-b761-72a26cd5c543,DISK] 2024-11-21T00:28:45,808 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/WALs/5ed4808ef0e6,40563,1732148924943/5ed4808ef0e6%2C40563%2C1732148924943.1732148925788 2024-11-21T00:28:45,808 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43193:43193)] 2024-11-21T00:28:45,808 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:45,809 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:45,809 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,809 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,810 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,811 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:28:45,811 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:45,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:45,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:28:45,814 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:45,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:45,814 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:28:45,817 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:45,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:45,817 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:28:45,819 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:45,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:45,820 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,820 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,820 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,821 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,821 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,822 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:45,822 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:45,824 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:45,824 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66010005, jitterRate=-0.01637427508831024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:45,824 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148925809Initializing all the Stores at 1732148925809Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148925809Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148925810 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148925810Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148925810Cleaning up temporary data from old regions at 1732148925821 (+11 ms)Region opened successfully at 1732148925824 (+3 ms) 2024-11-21T00:28:45,824 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:28:45,826 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@716c78db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:45,827 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:28:45,827 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:28:45,828 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:28:45,828 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:28:45,828 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:28:45,828 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:28:45,828 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:28:45,838 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:28:45,838 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Unable to get data of znode /01646092936/balancer because node does not exist (not necessarily an error) 2024-11-21T00:28:45,849 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01646092936/balancer already deleted, retry=false 2024-11-21T00:28:45,850 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:28:45,851 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Unable to get data of znode /01646092936/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:28:45,864 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01646092936/normalizer already deleted, retry=false 2024-11-21T00:28:45,864 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:28:45,871 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Unable to get data of znode /01646092936/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:28:45,883 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01646092936/switch/split already deleted, retry=false 2024-11-21T00:28:45,885 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Unable to get data of znode /01646092936/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:28:45,894 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01646092936/switch/merge already deleted, retry=false 2024-11-21T00:28:45,897 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Unable to get data of znode /01646092936/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:28:45,908 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /01646092936/snapshot-cleanup already deleted, retry=false 2024-11-21T00:28:45,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01646092936/running 2024-11-21T00:28:45,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:45,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/01646092936/running 2024-11-21T00:28:45,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:45,924 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,40563,1732148924943, sessionid=0x1015acb039c0000, setting cluster-up flag (Was=false) 2024-11-21T00:28:45,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:45,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:46,007 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01646092936/flush-table-proc/acquired, /01646092936/flush-table-proc/reached, /01646092936/flush-table-proc/abort 2024-11-21T00:28:46,008 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:46,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:46,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:46,133 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /01646092936/online-snapshot/acquired, /01646092936/online-snapshot/reached, /01646092936/online-snapshot/abort 2024-11-21T00:28:46,134 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:46,135 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:28:46,137 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:46,137 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:28:46,137 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:28:46,137 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,40563,1732148924943 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:46,139 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,140 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:46,141 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:28:46,141 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,142 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:28:46,148 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148956148 2024-11-21T00:28:46,149 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:28:46,149 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:28:46,149 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:28:46,149 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:28:46,149 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:28:46,149 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:28:46,149 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,150 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:28:46,150 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:28:46,150 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:28:46,150 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:28:46,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:28:46,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:28:46,151 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148926151,5,FailOnTimeoutGroup] 2024-11-21T00:28:46,151 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148926151,5,FailOnTimeoutGroup] 2024-11-21T00:28:46,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:28:46,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,151 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:28:46,228 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(746): ClusterId : 2769c2a5-6298-47aa-8583-d1e5652dbcb6 2024-11-21T00:28:46,228 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:28:46,239 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:28:46,239 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:28:46,250 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:28:46,250 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11cd5206, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:46,264 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:38737 2024-11-21T00:28:46,264 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:28:46,264 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:28:46,264 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:28:46,265 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,40563,1732148924943 with port=38737, startcode=1732148925192 2024-11-21T00:28:46,265 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:28:46,266 INFO [HMaster-EventLoopGroup-45-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54723, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:28:46,267 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40563 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,267 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40563 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,268 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd 2024-11-21T00:28:46,268 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38659 2024-11-21T00:28:46,268 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:28:46,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936/rs 2024-11-21T00:28:46,283 DEBUG [RS:0;5ed4808ef0e6:38737 {}] zookeeper.ZKUtil(111): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on existing znode=/01646092936/rs/5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,283 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,38737,1732148925192] 2024-11-21T00:28:46,283 WARN [RS:0;5ed4808ef0e6:38737 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:46,283 INFO [RS:0;5ed4808ef0e6:38737 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:46,283 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,285 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:28:46,286 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:28:46,288 INFO [RS:0;5ed4808ef0e6:38737 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:28:46,288 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,291 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:28:46,292 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:28:46,292 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:46,292 DEBUG [RS:0;5ed4808ef0e6:38737 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:46,299 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,299 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,299 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,299 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,299 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,299 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38737,1732148925192-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:46,313 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:28:46,313 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,38737,1732148925192-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,313 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,313 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.Replication(171): 5ed4808ef0e6,38737,1732148925192 started 2024-11-21T00:28:46,333 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:46,333 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,38737,1732148925192, RpcServer on 5ed4808ef0e6/172.17.0.2:38737, sessionid=0x1015acb039c0001 2024-11-21T00:28:46,333 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:28:46,333 DEBUG [RS:0;5ed4808ef0e6:38737 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,333 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,38737,1732148925192' 2024-11-21T00:28:46,333 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01646092936/flush-table-proc/abort' 2024-11-21T00:28:46,334 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01646092936/flush-table-proc/acquired' 2024-11-21T00:28:46,334 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:28:46,334 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:28:46,334 DEBUG [RS:0;5ed4808ef0e6:38737 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,334 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,38737,1732148925192' 2024-11-21T00:28:46,334 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/01646092936/online-snapshot/abort' 2024-11-21T00:28:46,335 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/01646092936/online-snapshot/acquired' 2024-11-21T00:28:46,336 DEBUG [RS:0;5ed4808ef0e6:38737 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:28:46,336 INFO [RS:0;5ed4808ef0e6:38737 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:28:46,336 INFO [RS:0;5ed4808ef0e6:38737 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:28:46,436 INFO [RS:0;5ed4808ef0e6:38737 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:46,437 INFO [RS:0;5ed4808ef0e6:38737 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C38737%2C1732148925192, suffix=, logDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192, archiveDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/oldWALs, maxLogs=10 2024-11-21T00:28:46,450 DEBUG [RS:0;5ed4808ef0e6:38737 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438, exclude list is [], retry=0 2024-11-21T00:28:46,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-823beef4-9d61-40e1-b761-72a26cd5c543,DISK] 2024-11-21T00:28:46,454 INFO [RS:0;5ed4808ef0e6:38737 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 2024-11-21T00:28:46,454 DEBUG [RS:0;5ed4808ef0e6:38737 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43193:43193)] 2024-11-21T00:28:46,553 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:28:46,553 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd 2024-11-21T00:28:46,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:28:46,558 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:46,558 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:46,559 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:46,560 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,560 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:46,561 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:46,561 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,561 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:46,562 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:46,562 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,562 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:46,563 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:46,563 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,563 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,563 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:46,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740 2024-11-21T00:28:46,564 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740 2024-11-21T00:28:46,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:46,565 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:46,565 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:46,566 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:46,567 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:46,568 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64248537, jitterRate=-0.042622193694114685}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:46,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148926558Initializing all the Stores at 1732148926558Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148926558Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148926558Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148926558Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148926558Cleaning up temporary data from old regions at 1732148926565 (+7 ms)Region opened successfully at 1732148926568 (+3 ms) 2024-11-21T00:28:46,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:46,568 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:46,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:46,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:46,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:46,568 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:46,568 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148926568Disabling compacts and flushes for region at 1732148926568Disabling writes for close at 1732148926568Writing region close event to WAL at 1732148926568Closed at 1732148926568 2024-11-21T00:28:46,569 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:46,569 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:28:46,569 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:28:46,570 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:46,571 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:28:46,721 DEBUG [5ed4808ef0e6:40563 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:28:46,722 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,723 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,38737,1732148925192, state=OPENING 2024-11-21T00:28:46,755 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:46,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,756 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,757 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,778 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:28:46,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:46,788 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:28:46,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01646092936/meta-region-server: CHANGED 2024-11-21T00:28:46,789 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01646092936/meta-region-server: CHANGED 2024-11-21T00:28:46,789 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:46,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,38737,1732148925192}] 2024-11-21T00:28:46,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,801 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,802 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:46,837 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:28:46,838 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'test' 2024-11-21T00:28:46,941 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:46,942 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60637, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:28:46,944 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:28:46,944 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:46,944 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:28:46,945 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C38737%2C1732148925192.meta, suffix=.meta, logDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192, archiveDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/oldWALs, maxLogs=10 2024-11-21T00:28:46,957 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.meta.1732148926946.meta, exclude list is [], retry=0 2024-11-21T00:28:46,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-823beef4-9d61-40e1-b761-72a26cd5c543,DISK] 2024-11-21T00:28:46,961 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.meta.1732148926946.meta 2024-11-21T00:28:46,961 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43193:43193)] 2024-11-21T00:28:46,961 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:46,961 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:46,961 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:46,962 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:28:46,962 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:28:46,962 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:28:46,962 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:46,962 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:28:46,962 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:28:46,963 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:46,964 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:46,964 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,964 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:46,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:46,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,965 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:46,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:46,965 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:46,966 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:46,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:46,966 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:46,966 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:46,967 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740 2024-11-21T00:28:46,968 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740 2024-11-21T00:28:46,968 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:46,968 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:46,969 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:46,970 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:46,970 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67352590, jitterRate=0.003631800413131714}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:46,970 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:28:46,970 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148926962Writing region info on filesystem at 1732148926962Initializing all the Stores at 1732148926963 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148926963Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148926963Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148926963Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148926963Cleaning up temporary data from old regions at 1732148926968 (+5 ms)Running coprocessor post-open hooks at 1732148926970 (+2 ms)Region opened successfully at 1732148926970 2024-11-21T00:28:46,971 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148926941 2024-11-21T00:28:46,972 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:28:46,972 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:28:46,973 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:46,973 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,38737,1732148925192, state=OPEN 2024-11-21T00:28:47,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01646092936/meta-region-server 2024-11-21T00:28:47,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/01646092936/meta-region-server 2024-11-21T00:28:47,113 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:47,113 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01646092936/meta-region-server: CHANGED 2024-11-21T00:28:47,113 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /01646092936/meta-region-server: CHANGED 2024-11-21T00:28:47,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:28:47,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,38737,1732148925192 in 324 msec 2024-11-21T00:28:47,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:28:47,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 546 msec 2024-11-21T00:28:47,117 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:47,117 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:28:47,118 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:47,118 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38737,1732148925192, seqNum=-1] 2024-11-21T00:28:47,118 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:47,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-46-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48691, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:47,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 987 msec 2024-11-21T00:28:47,123 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148927123, completionTime=-1 2024-11-21T00:28:47,123 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:28:47,123 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:28:47,124 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:28:47,124 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148987124 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732149047125 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 1 msec 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40563,1732148924943-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40563,1732148924943-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40563,1732148924943-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:40563, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:47,125 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:47,126 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:28:47,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.857sec 2024-11-21T00:28:47,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:28:47,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:28:47,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:28:47,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:28:47,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:28:47,127 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40563,1732148924943-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:47,128 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40563,1732148924943-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:28:47,128 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e107ad6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:47,128 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,40563,-1 for getting cluster id 2024-11-21T00:28:47,129 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:47,129 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:28:47,129 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:28:47,129 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40563,1732148924943-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:47,130 DEBUG [HMaster-EventLoopGroup-45-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2769c2a5-6298-47aa-8583-d1e5652dbcb6' 2024-11-21T00:28:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2769c2a5-6298-47aa-8583-d1e5652dbcb6" 2024-11-21T00:28:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52be5c29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,40563,-1] 2024-11-21T00:28:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:47,130 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:47,131 INFO [HMaster-EventLoopGroup-45-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:47,131 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f56406f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:47,132 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:47,132 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38737,1732148925192, seqNum=-1] 2024-11-21T00:28:47,132 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:47,133 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-46-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44346, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:47,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:47,135 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster0 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:47,157 WARN [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 5, running: 1 2024-11-21T00:28:47,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster00x0, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:47,157 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster0-0x1015acb039c0002 connected 2024-11-21T00:28:47,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.log.dir so I do NOT create it in target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1 2024-11-21T00:28:47,170 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:28:47,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.tmp.dir so I do NOT create it in target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1 2024-11-21T00:28:47,170 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:28:47,170 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1 2024-11-21T00:28:47,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:28:47,170 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/cluster_db71360f-4ddc-991f-5d5b-6c51c7db50b9, deleteOnExit=true 2024-11-21T00:28:47,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/test.cache.data in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:28:47,171 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:28:47,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:28:47,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:28:47,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:47,675 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:47,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:47,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:47,691 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:28:47,691 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:47,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7d073492{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:47,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@455d2d73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:47,808 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ec68835{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/java.io.tmpdir/jetty-localhost-39419-hadoop-hdfs-3_4_1-tests_jar-_-any-18244549908142934711/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:47,808 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ff310ca{HTTP/1.1, (http/1.1)}{localhost:39419} 2024-11-21T00:28:47,808 INFO [Time-limited test {}] server.Server(415): Started @662358ms 2024-11-21T00:28:48,081 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:48,086 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:48,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:48,087 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:48,087 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:28:48,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@679c947f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:48,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@156f5b83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:48,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@483ea1e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/java.io.tmpdir/jetty-localhost-40535-hadoop-hdfs-3_4_1-tests_jar-_-any-3055403591307989872/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:48,194 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6099d95f{HTTP/1.1, (http/1.1)}{localhost:40535} 2024-11-21T00:28:48,194 INFO [Time-limited test {}] server.Server(415): Started @662743ms 2024-11-21T00:28:48,195 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:28:49,134 WARN [Thread-4480 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/cluster_db71360f-4ddc-991f-5d5b-6c51c7db50b9/data/data1/current/BP-1901245850-172.17.0.2-1732148927192/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:49,134 WARN [Thread-4481 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/cluster_db71360f-4ddc-991f-5d5b-6c51c7db50b9/data/data2/current/BP-1901245850-172.17.0.2-1732148927192/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:49,155 WARN [Thread-4468 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:28:49,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7db7672ea8080be with lease ID 0x25433210072d93d2: Processing first storage report for DS-3dffb2e2-d436-4733-8558-e3ed542bc85e from datanode DatanodeRegistration(127.0.0.1:38623, datanodeUuid=ec6de4a0-9f6c-4551-96a6-49bf945103ff, infoPort=44609, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=990280687;c=1732148927192) 2024-11-21T00:28:49,158 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7db7672ea8080be with lease ID 0x25433210072d93d2: from storage DS-3dffb2e2-d436-4733-8558-e3ed542bc85e node DatanodeRegistration(127.0.0.1:38623, datanodeUuid=ec6de4a0-9f6c-4551-96a6-49bf945103ff, infoPort=44609, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=990280687;c=1732148927192), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:49,158 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb7db7672ea8080be with lease ID 0x25433210072d93d2: Processing first storage report for DS-e07658aa-3428-46cb-8edf-ed3ffdab736c from datanode DatanodeRegistration(127.0.0.1:38623, datanodeUuid=ec6de4a0-9f6c-4551-96a6-49bf945103ff, infoPort=44609, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=990280687;c=1732148927192) 2024-11-21T00:28:49,158 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb7db7672ea8080be with lease ID 0x25433210072d93d2: from storage DS-e07658aa-3428-46cb-8edf-ed3ffdab736c node DatanodeRegistration(127.0.0.1:38623, datanodeUuid=ec6de4a0-9f6c-4551-96a6-49bf945103ff, infoPort=44609, infoSecurePort=0, ipcPort=39371, storageInfo=lv=-57;cid=testClusterID;nsid=990280687;c=1732148927192), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:49,244 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1 2024-11-21T00:28:49,244 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:49,246 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:49,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:28:49,252 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314 with version=8 2024-11-21T00:28:49,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/hbase-staging 2024-11-21T00:28:49,254 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:49,254 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:49,254 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:49,254 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:49,254 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:49,254 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:49,254 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:28:49,254 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:49,255 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37585 2024-11-21T00:28:49,255 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37585 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:49,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:375850x0, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:49,306 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37585-0x1015acb039c0003 connected 2024-11-21T00:28:49,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:49,390 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:49,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on znode that does not yet exist, /1702889742/running 2024-11-21T00:28:49,391 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314, hbase.cluster.distributed=false 2024-11-21T00:28:49,392 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on znode that does not yet exist, /1702889742/acl 2024-11-21T00:28:49,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37585 2024-11-21T00:28:49,392 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37585 2024-11-21T00:28:49,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37585 2024-11-21T00:28:49,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37585 2024-11-21T00:28:49,393 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37585 2024-11-21T00:28:49,411 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:49,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:49,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:49,412 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:49,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:49,412 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:49,413 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:28:49,413 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:49,420 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40747 2024-11-21T00:28:49,421 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40747 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:49,422 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:49,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:49,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:407470x0, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:49,453 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:407470x0, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on znode that does not yet exist, /1702889742/running 2024-11-21T00:28:49,454 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:28:49,455 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40747-0x1015acb039c0004 connected 2024-11-21T00:28:49,456 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:28:49,456 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on znode that does not yet exist, /1702889742/master 2024-11-21T00:28:49,457 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on znode that does not yet exist, /1702889742/acl 2024-11-21T00:28:49,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40747 2024-11-21T00:28:49,467 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40747 2024-11-21T00:28:49,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40747 2024-11-21T00:28:49,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40747 2024-11-21T00:28:49,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40747 2024-11-21T00:28:49,505 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:37585 2024-11-21T00:28:49,505 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /1702889742/backup-masters/5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:49,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742/backup-masters 2024-11-21T00:28:49,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742/backup-masters 2024-11-21T00:28:49,515 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on existing znode=/1702889742/backup-masters/5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:49,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:49,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1702889742/master 2024-11-21T00:28:49,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:49,526 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on existing znode=/1702889742/master 2024-11-21T00:28:49,526 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /1702889742/backup-masters/5ed4808ef0e6,37585,1732148929254 from backup master directory 2024-11-21T00:28:49,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1702889742/backup-masters/5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:49,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742/backup-masters 2024-11-21T00:28:49,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742/backup-masters 2024-11-21T00:28:49,536 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:49,536 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:49,540 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/hbase.id] with ID: d3f68a05-0b79-44d8-b908-8e41661cca8a 2024-11-21T00:28:49,540 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/.tmp/hbase.id 2024-11-21T00:28:49,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:28:49,563 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/.tmp/hbase.id]:[hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/hbase.id] 2024-11-21T00:28:49,573 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:49,573 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:28:49,574 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:28:49,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:49,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:49,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:28:49,618 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:49,619 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:28:49,619 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:49,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:28:50,038 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store 2024-11-21T00:28:50,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:28:50,144 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:28:50,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:50,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:50,443 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:50,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:50,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:50,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:50,443 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:50,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148930443Disabling compacts and flushes for region at 1732148930443Disabling writes for close at 1732148930443Writing region close event to WAL at 1732148930443Closed at 1732148930443 2024-11-21T00:28:50,443 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/.initializing 2024-11-21T00:28:50,443 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/WALs/5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:50,444 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:50,445 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C37585%2C1732148929254, suffix=, logDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/WALs/5ed4808ef0e6,37585,1732148929254, archiveDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/oldWALs, maxLogs=10 2024-11-21T00:28:50,457 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/WALs/5ed4808ef0e6,37585,1732148929254/5ed4808ef0e6%2C37585%2C1732148929254.1732148930445, exclude list is [], retry=0 2024-11-21T00:28:50,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-48-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38623,DS-3dffb2e2-d436-4733-8558-e3ed542bc85e,DISK] 2024-11-21T00:28:50,463 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/WALs/5ed4808ef0e6,37585,1732148929254/5ed4808ef0e6%2C37585%2C1732148929254.1732148930445 2024-11-21T00:28:50,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44609:44609)] 2024-11-21T00:28:50,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:50,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:50,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,464 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:28:50,469 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:50,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:50,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:28:50,470 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:50,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:50,470 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:28:50,471 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:50,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:50,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:28:50,472 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:50,472 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:50,472 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,473 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,473 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,473 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,474 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,474 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:50,474 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:50,476 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:50,476 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70118767, jitterRate=0.04485104978084564}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:50,477 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148930464Initializing all the Stores at 1732148930465 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148930465Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148930467 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148930467Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148930467Cleaning up temporary data from old regions at 1732148930474 (+7 ms)Region opened successfully at 1732148930476 (+2 ms) 2024-11-21T00:28:50,477 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:28:50,479 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20b93d5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:50,480 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:28:50,480 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:28:50,480 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:28:50,480 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:28:50,480 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:28:50,481 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:28:50,481 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:28:50,482 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:28:50,483 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Unable to get data of znode /1702889742/balancer because node does not exist (not necessarily an error) 2024-11-21T00:28:50,548 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1702889742/balancer already deleted, retry=false 2024-11-21T00:28:50,549 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:28:50,549 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Unable to get data of znode /1702889742/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:28:50,578 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1702889742/normalizer already deleted, retry=false 2024-11-21T00:28:50,578 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:28:50,579 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Unable to get data of znode /1702889742/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:28:50,588 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1702889742/switch/split already deleted, retry=false 2024-11-21T00:28:50,589 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Unable to get data of znode /1702889742/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:28:50,599 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1702889742/switch/merge already deleted, retry=false 2024-11-21T00:28:50,600 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Unable to get data of znode /1702889742/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:28:50,609 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /1702889742/snapshot-cleanup already deleted, retry=false 2024-11-21T00:28:50,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1702889742/running 2024-11-21T00:28:50,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/1702889742/running 2024-11-21T00:28:50,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:50,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:50,621 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,37585,1732148929254, sessionid=0x1015acb039c0003, setting cluster-up flag (Was=false) 2024-11-21T00:28:50,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:50,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:50,684 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1702889742/flush-table-proc/acquired, /1702889742/flush-table-proc/reached, /1702889742/flush-table-proc/abort 2024-11-21T00:28:50,696 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:50,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:50,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:50,893 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /1702889742/online-snapshot/acquired, /1702889742/online-snapshot/reached, /1702889742/online-snapshot/abort 2024-11-21T00:28:50,894 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:50,895 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:28:50,896 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:50,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:28:50,897 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:28:50,897 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,37585,1732148929254 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:50,898 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148960899 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:28:50,899 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,899 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:50,900 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:28:50,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:28:50,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:28:50,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:28:50,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:28:50,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:28:50,900 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:28:50,900 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148930900,5,FailOnTimeoutGroup] 2024-11-21T00:28:50,900 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148930900,5,FailOnTimeoutGroup] 2024-11-21T00:28:50,901 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,901 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:50,901 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:28:50,901 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,901 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,901 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:28:50,901 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(746): ClusterId : d3f68a05-0b79-44d8-b908-8e41661cca8a 2024-11-21T00:28:50,901 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:28:50,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:28:50,912 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:28:50,912 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314 2024-11-21T00:28:50,912 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:28:50,912 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:28:50,924 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:28:50,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741832_1008 (size=32) 2024-11-21T00:28:50,924 DEBUG [RS:0;5ed4808ef0e6:40747 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a7824d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:50,937 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:40747 2024-11-21T00:28:50,937 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:28:50,937 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:28:50,937 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:28:50,937 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,37585,1732148929254 with port=40747, startcode=1732148929411 2024-11-21T00:28:50,938 DEBUG [RS:0;5ed4808ef0e6:40747 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:28:50,940 INFO [HMaster-EventLoopGroup-47-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46839, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.22 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:28:50,941 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37585 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:50,941 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37585 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:50,942 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314 2024-11-21T00:28:50,942 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46101 2024-11-21T00:28:50,942 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:28:50,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742/rs 2024-11-21T00:28:50,954 DEBUG [RS:0;5ed4808ef0e6:40747 {}] zookeeper.ZKUtil(111): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on existing znode=/1702889742/rs/5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:50,954 WARN [RS:0;5ed4808ef0e6:40747 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:50,954 INFO [RS:0;5ed4808ef0e6:40747 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:50,954 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/WALs/5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:50,954 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,40747,1732148929411] 2024-11-21T00:28:50,957 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:28:50,958 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:28:50,959 INFO [RS:0;5ed4808ef0e6:40747 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:28:50,959 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,960 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:28:50,961 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:28:50,961 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:50,961 DEBUG [RS:0;5ed4808ef0e6:40747 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:50,962 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,962 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,962 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,962 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,962 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,962 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40747,1732148929411-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:50,982 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:28:50,982 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,40747,1732148929411-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,982 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:50,982 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.Replication(171): 5ed4808ef0e6,40747,1732148929411 started 2024-11-21T00:28:51,002 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:51,002 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,40747,1732148929411, RpcServer on 5ed4808ef0e6/172.17.0.2:40747, sessionid=0x1015acb039c0004 2024-11-21T00:28:51,002 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:28:51,002 DEBUG [RS:0;5ed4808ef0e6:40747 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:51,002 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,40747,1732148929411' 2024-11-21T00:28:51,002 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1702889742/flush-table-proc/abort' 2024-11-21T00:28:51,004 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1702889742/flush-table-proc/acquired' 2024-11-21T00:28:51,005 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:28:51,005 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:28:51,005 DEBUG [RS:0;5ed4808ef0e6:40747 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:51,005 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,40747,1732148929411' 2024-11-21T00:28:51,005 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/1702889742/online-snapshot/abort' 2024-11-21T00:28:51,005 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/1702889742/online-snapshot/acquired' 2024-11-21T00:28:51,005 DEBUG [RS:0;5ed4808ef0e6:40747 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:28:51,005 INFO [RS:0;5ed4808ef0e6:40747 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:28:51,005 INFO [RS:0;5ed4808ef0e6:40747 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:28:51,106 INFO [RS:0;5ed4808ef0e6:40747 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:51,108 INFO [RS:0;5ed4808ef0e6:40747 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C40747%2C1732148929411, suffix=, logDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/WALs/5ed4808ef0e6,40747,1732148929411, archiveDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/oldWALs, maxLogs=10 2024-11-21T00:28:51,125 DEBUG [RS:0;5ed4808ef0e6:40747 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/WALs/5ed4808ef0e6,40747,1732148929411/5ed4808ef0e6%2C40747%2C1732148929411.1732148931108, exclude list is [], retry=0 2024-11-21T00:28:51,129 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-48-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38623,DS-3dffb2e2-d436-4733-8558-e3ed542bc85e,DISK] 2024-11-21T00:28:51,138 INFO [RS:0;5ed4808ef0e6:40747 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/WALs/5ed4808ef0e6,40747,1732148929411/5ed4808ef0e6%2C40747%2C1732148929411.1732148931108 2024-11-21T00:28:51,180 DEBUG [RS:0;5ed4808ef0e6:40747 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44609:44609)] 2024-11-21T00:28:51,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:51,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:51,357 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:51,357 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,361 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:51,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:51,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,378 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:51,387 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:51,388 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,391 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:51,401 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:51,401 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:51,411 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740 2024-11-21T00:28:51,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740 2024-11-21T00:28:51,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:51,414 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:51,414 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:51,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:51,428 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:51,430 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63956382, jitterRate=-0.04697564244270325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:51,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148931330Initializing all the Stores at 1732148931336 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148931336Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148931340 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148931340Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148931340Cleaning up temporary data from old regions at 1732148931414 (+74 ms)Region opened successfully at 1732148931430 (+16 ms) 2024-11-21T00:28:51,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:51,430 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:51,430 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:51,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:51,431 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:51,432 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:51,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148931430Disabling compacts and flushes for region at 1732148931430Disabling writes for close at 1732148931431 (+1 ms)Writing region close event to WAL at 1732148931432 (+1 ms)Closed at 1732148931432 2024-11-21T00:28:51,437 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:51,437 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:28:51,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:28:51,440 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:51,447 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:28:51,599 DEBUG [5ed4808ef0e6:37585 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:28:51,600 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:51,603 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,40747,1732148929411, state=OPENING 2024-11-21T00:28:51,697 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:28:51,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:51,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:28:51,715 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1702889742/meta-region-server: CHANGED 2024-11-21T00:28:51,716 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1702889742/meta-region-server: CHANGED 2024-11-21T00:28:51,717 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:51,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,40747,1732148929411}] 2024-11-21T00:28:51,872 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:51,880 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-48-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33013, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:28:51,896 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:28:51,896 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:51,897 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:28:51,909 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C40747%2C1732148929411.meta, suffix=.meta, logDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/WALs/5ed4808ef0e6,40747,1732148929411, archiveDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/oldWALs, maxLogs=10 2024-11-21T00:28:51,930 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/WALs/5ed4808ef0e6,40747,1732148929411/5ed4808ef0e6%2C40747%2C1732148929411.meta.1732148931909.meta, exclude list is [], retry=0 2024-11-21T00:28:51,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-48-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38623,DS-3dffb2e2-d436-4733-8558-e3ed542bc85e,DISK] 2024-11-21T00:28:51,947 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/WALs/5ed4808ef0e6,40747,1732148929411/5ed4808ef0e6%2C40747%2C1732148929411.meta.1732148931909.meta 2024-11-21T00:28:51,949 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44609:44609)] 2024-11-21T00:28:51,949 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:51,949 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:51,949 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:51,950 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:28:51,950 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:28:51,950 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:28:51,950 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:51,950 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:28:51,950 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:28:51,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:51,978 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:51,978 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:51,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:51,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:51,987 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:51,987 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:51,997 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:51,997 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:51,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:51,998 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:51,999 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740 2024-11-21T00:28:52,006 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740 2024-11-21T00:28:52,011 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:52,011 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:52,012 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:52,016 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:52,017 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72191921, jitterRate=0.07574345171451569}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:52,017 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:28:52,017 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148931950Writing region info on filesystem at 1732148931950Initializing all the Stores at 1732148931959 (+9 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148931959Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148931964 (+5 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148931964Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148931964Cleaning up temporary data from old regions at 1732148932011 (+47 ms)Running coprocessor post-open hooks at 1732148932017 (+6 ms)Region opened successfully at 1732148932017 2024-11-21T00:28:52,024 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148931871 2024-11-21T00:28:52,028 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:28:52,028 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:28:52,029 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:52,029 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,40747,1732148929411, state=OPEN 2024-11-21T00:28:52,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1702889742/meta-region-server 2024-11-21T00:28:52,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/1702889742/meta-region-server 2024-11-21T00:28:52,101 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:52,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1702889742/meta-region-server: CHANGED 2024-11-21T00:28:52,101 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /1702889742/meta-region-server: CHANGED 2024-11-21T00:28:52,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:28:52,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,40747,1732148929411 in 384 msec 2024-11-21T00:28:52,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:28:52,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 666 msec 2024-11-21T00:28:52,105 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:52,105 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:28:52,106 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:52,107 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,40747,1732148929411, seqNum=-1] 2024-11-21T00:28:52,107 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:52,108 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-48-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43433, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:52,111 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2150 sec 2024-11-21T00:28:52,112 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148932111, completionTime=-1 2024-11-21T00:28:52,112 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:28:52,112 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148992117 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732149052117 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 5 msec 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37585,1732148929254-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37585,1732148929254-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37585,1732148929254-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:37585, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:52,117 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:52,118 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:52,119 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.584sec 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37585,1732148929254-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:52,120 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37585,1732148929254-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:28:52,126 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:28:52,126 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:28:52,126 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37585,1732148929254-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:52,207 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e55d38c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:52,207 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37585,-1 for getting cluster id 2024-11-21T00:28:52,207 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:52,208 DEBUG [HMaster-EventLoopGroup-47-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3f68a05-0b79-44d8-b908-8e41661cca8a' 2024-11-21T00:28:52,208 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:52,208 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3f68a05-0b79-44d8-b908-8e41661cca8a" 2024-11-21T00:28:52,208 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88cdce0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:52,208 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37585,-1] 2024-11-21T00:28:52,209 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:52,209 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:52,209 INFO [HMaster-EventLoopGroup-47-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48522, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:52,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7193b1a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:52,210 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:52,211 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,40747,1732148929411, seqNum=-1] 2024-11-21T00:28:52,211 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:52,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-48-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39536, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:52,213 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:52,214 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster1 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:52,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster10x0, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:52,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster1-0x1015acb039c0005 connected 2024-11-21T00:28:52,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.log.dir so I do NOT create it in target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e 2024-11-21T00:28:52,243 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.log.dir property value differs in configuration and system: Configuration=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/../logs while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.log.dir Erasing configuration value by system value. 2024-11-21T00:28:52,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.tmp.dir so I do NOT create it in target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e 2024-11-21T00:28:52,243 WARN [Time-limited test {}] hbase.HBaseTestingUtil(405): hadoop.tmp.dir property value differs in configuration and system: Configuration=/tmp/hadoop-jenkins while System=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.tmp.dir Erasing configuration value by system value. 2024-11-21T00:28:52,243 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e 2024-11-21T00:28:52,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-21T00:28:52,243 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/cluster_492b2821-a719-7498-3548-b01a9298b34c, deleteOnExit=true 2024-11-21T00:28:52,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/test.cache.data in system properties and HBase conf 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/hadoop.tmp.dir in system properties and HBase conf 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/hadoop.log.dir in system properties and HBase conf 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-21T00:28:52,244 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:52,244 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/nfs.dump.dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/java.io.tmpdir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-21T00:28:52,245 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-21T00:28:52,465 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:52,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,466 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,467 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,510 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,511 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,512 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,524 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:52,984 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:28:53,022 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:53,033 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:53,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:53,073 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:53,073 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-21T00:28:53,074 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:53,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50aaefd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:53,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@558fb9db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:53,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bdd2661{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/java.io.tmpdir/jetty-localhost-44421-hadoop-hdfs-3_4_1-tests_jar-_-any-4835781683335870792/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:28:53,232 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3613c72b{HTTP/1.1, (http/1.1)}{localhost:44421} 2024-11-21T00:28:53,232 INFO [Time-limited test {}] server.Server(415): Started @667782ms 2024-11-21T00:28:53,623 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-21T00:28:53,625 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-21T00:28:53,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-21T00:28:53,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-21T00:28:53,632 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-21T00:28:53,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44f669d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/hadoop.log.dir/,AVAILABLE} 2024-11-21T00:28:53,634 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bb67c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-21T00:28:53,750 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52fd1809{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/java.io.tmpdir/jetty-localhost-37109-hadoop-hdfs-3_4_1-tests_jar-_-any-5476660361086279840/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:28:53,750 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3026cbe4{HTTP/1.1, (http/1.1)}{localhost:37109} 2024-11-21T00:28:53,751 INFO [Time-limited test {}] server.Server(415): Started @668300ms 2024-11-21T00:28:53,752 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-21T00:28:54,425 WARN [Thread-4602 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/cluster_492b2821-a719-7498-3548-b01a9298b34c/data/data1/current/BP-2040318916-172.17.0.2-1732148932267/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:54,425 WARN [Thread-4603 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/cluster_492b2821-a719-7498-3548-b01a9298b34c/data/data2/current/BP-2040318916-172.17.0.2-1732148932267/current, will proceed with Du for space computation calculation, 2024-11-21T00:28:54,457 WARN [Thread-4590 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-21T00:28:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1389824033c48c7 with lease ID 0xbd8ea2986cdf6ba0: Processing first storage report for DS-274b5745-a7ed-469f-82cd-e2d6cecdf978 from datanode DatanodeRegistration(127.0.0.1:46487, datanodeUuid=061167e0-d276-436f-8806-542de1d2fa7b, infoPort=45675, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1585808618;c=1732148932267) 2024-11-21T00:28:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1389824033c48c7 with lease ID 0xbd8ea2986cdf6ba0: from storage DS-274b5745-a7ed-469f-82cd-e2d6cecdf978 node DatanodeRegistration(127.0.0.1:46487, datanodeUuid=061167e0-d276-436f-8806-542de1d2fa7b, infoPort=45675, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1585808618;c=1732148932267), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1389824033c48c7 with lease ID 0xbd8ea2986cdf6ba0: Processing first storage report for DS-27b06058-7b9c-43da-b928-d35f8d6e39f1 from datanode DatanodeRegistration(127.0.0.1:46487, datanodeUuid=061167e0-d276-436f-8806-542de1d2fa7b, infoPort=45675, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1585808618;c=1732148932267) 2024-11-21T00:28:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1389824033c48c7 with lease ID 0xbd8ea2986cdf6ba0: from storage DS-27b06058-7b9c-43da-b928-d35f8d6e39f1 node DatanodeRegistration(127.0.0.1:46487, datanodeUuid=061167e0-d276-436f-8806-542de1d2fa7b, infoPort=45675, infoSecurePort=0, ipcPort=33021, storageInfo=lv=-57;cid=testClusterID;nsid=1585808618;c=1732148932267), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-21T00:28:54,524 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e 2024-11-21T00:28:54,524 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:54,526 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:54,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741825_1001 (size=7) 2024-11-21T00:28:54,943 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88 with version=8 2024-11-21T00:28:54,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/hbase-staging 2024-11-21T00:28:54,945 INFO [Time-limited test {}] client.ConnectionUtils(128): master/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:54,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:54,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:54,946 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:54,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:54,946 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:54,946 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-21T00:28:54,946 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:54,946 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37889 2024-11-21T00:28:54,947 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37889 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:55,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:378890x0, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:55,010 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37889-0x1015acb039c0006 connected 2024-11-21T00:28:55,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:55,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:55,087 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on znode that does not yet exist, /2-1051660059/running 2024-11-21T00:28:55,087 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88, hbase.cluster.distributed=false 2024-11-21T00:28:55,088 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on znode that does not yet exist, /2-1051660059/acl 2024-11-21T00:28:55,089 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37889 2024-11-21T00:28:55,089 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37889 2024-11-21T00:28:55,089 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37889 2024-11-21T00:28:55,089 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37889 2024-11-21T00:28:55,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37889 2024-11-21T00:28:55,107 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/5ed4808ef0e6:0 server-side Connection retries=45 2024-11-21T00:28:55,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:55,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:55,107 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-21T00:28:55,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-21T00:28:55,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-21T00:28:55,108 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-21T00:28:55,108 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-21T00:28:55,108 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35845 2024-11-21T00:28:55,109 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35845 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:55,109 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:55,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:55,132 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:358450x0, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:55,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:358450x0, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on znode that does not yet exist, /2-1051660059/running 2024-11-21T00:28:55,133 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35845-0x1015acb039c0007 connected 2024-11-21T00:28:55,133 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-21T00:28:55,135 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-21T00:28:55,135 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on znode that does not yet exist, /2-1051660059/master 2024-11-21T00:28:55,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on znode that does not yet exist, /2-1051660059/acl 2024-11-21T00:28:55,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35845 2024-11-21T00:28:55,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35845 2024-11-21T00:28:55,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35845 2024-11-21T00:28:55,228 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35845 2024-11-21T00:28:55,232 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35845 2024-11-21T00:28:55,246 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5ed4808ef0e6:37889 2024-11-21T00:28:55,246 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /2-1051660059/backup-masters/5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:55,453 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059/backup-masters 2024-11-21T00:28:55,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059/backup-masters 2024-11-21T00:28:55,453 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on existing znode=/2-1051660059/backup-masters/5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:55,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:55,642 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/2-1051660059/master 2024-11-21T00:28:55,642 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:55,642 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on existing znode=/2-1051660059/master 2024-11-21T00:28:55,648 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /2-1051660059/backup-masters/5ed4808ef0e6,37889,1732148934945 from backup master directory 2024-11-21T00:28:55,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-1051660059/backup-masters/5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:55,672 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059/backup-masters 2024-11-21T00:28:55,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059/backup-masters 2024-11-21T00:28:55,673 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:55,673 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:55,676 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/hbase.id] with ID: 23bbf42b-2379-405b-a2dd-f46ca66079c9 2024-11-21T00:28:55,676 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/.tmp/hbase.id 2024-11-21T00:28:55,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741826_1002 (size=42) 2024-11-21T00:28:56,080 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/.tmp/hbase.id]:[hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/hbase.id] 2024-11-21T00:28:56,088 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:28:56,088 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-21T00:28:56,089 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-21T00:28:56,104 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741827_1003 (size=196) 2024-11-21T00:28:56,109 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:56,110 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-21T00:28:56,110 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:56,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741828_1004 (size=1189) 2024-11-21T00:28:56,115 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store 2024-11-21T00:28:56,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741829_1005 (size=34) 2024-11-21T00:28:56,521 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:56,521 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:28:56,521 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:56,521 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:56,521 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:28:56,521 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:56,521 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:28:56,521 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148936521Disabling compacts and flushes for region at 1732148936521Disabling writes for close at 1732148936521Writing region close event to WAL at 1732148936521Closed at 1732148936521 2024-11-21T00:28:56,522 WARN [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/.initializing 2024-11-21T00:28:56,522 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/WALs/5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:56,523 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:56,524 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5ed4808ef0e6%2C37889%2C1732148934945, suffix=, logDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/WALs/5ed4808ef0e6,37889,1732148934945, archiveDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/oldWALs, maxLogs=10 2024-11-21T00:28:56,540 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/WALs/5ed4808ef0e6,37889,1732148934945/5ed4808ef0e6%2C37889%2C1732148934945.1732148936524, exclude list is [], retry=0 2024-11-21T00:28:56,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46487,DS-274b5745-a7ed-469f-82cd-e2d6cecdf978,DISK] 2024-11-21T00:28:56,544 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/WALs/5ed4808ef0e6,37889,1732148934945/5ed4808ef0e6%2C37889%2C1732148934945.1732148936524 2024-11-21T00:28:56,545 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45675:45675)] 2024-11-21T00:28:56,545 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:56,545 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:56,545 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,545 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-21T00:28:56,554 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:56,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:56,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,555 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-21T00:28:56,555 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:56,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:56,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-21T00:28:56,557 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:56,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:56,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-21T00:28:56,558 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:56,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:56,558 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,559 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,559 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,560 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,560 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,561 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:56,562 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-21T00:28:56,567 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:56,567 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65251976, jitterRate=-0.027669787406921387}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:56,567 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732148936545Initializing all the Stores at 1732148936546 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148936546Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148936552 (+6 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148936552Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148936552Cleaning up temporary data from old regions at 1732148936560 (+8 ms)Region opened successfully at 1732148936567 (+7 ms) 2024-11-21T00:28:56,568 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-21T00:28:56,570 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64a69de3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:56,571 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-21T00:28:56,572 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-21T00:28:56,572 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-21T00:28:56,572 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-21T00:28:56,572 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-21T00:28:56,573 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-21T00:28:56,573 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-21T00:28:56,578 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-21T00:28:56,578 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Unable to get data of znode /2-1051660059/balancer because node does not exist (not necessarily an error) 2024-11-21T00:28:56,630 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-1051660059/balancer already deleted, retry=false 2024-11-21T00:28:56,630 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-21T00:28:56,631 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Unable to get data of znode /2-1051660059/normalizer because node does not exist (not necessarily an error) 2024-11-21T00:28:56,641 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-1051660059/normalizer already deleted, retry=false 2024-11-21T00:28:56,641 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-21T00:28:56,642 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Unable to get data of znode /2-1051660059/switch/split because node does not exist (not necessarily an error) 2024-11-21T00:28:56,651 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-1051660059/switch/split already deleted, retry=false 2024-11-21T00:28:56,652 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Unable to get data of znode /2-1051660059/switch/merge because node does not exist (not necessarily an error) 2024-11-21T00:28:56,662 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-1051660059/switch/merge already deleted, retry=false 2024-11-21T00:28:56,663 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Unable to get data of znode /2-1051660059/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-21T00:28:56,672 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /2-1051660059/snapshot-cleanup already deleted, retry=false 2024-11-21T00:28:56,683 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/2-1051660059/running 2024-11-21T00:28:56,683 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/2-1051660059/running 2024-11-21T00:28:56,683 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,685 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=5ed4808ef0e6,37889,1732148934945, sessionid=0x1015acb039c0006, setting cluster-up flag (Was=false) 2024-11-21T00:28:56,704 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,735 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /2-1051660059/flush-table-proc/acquired, /2-1051660059/flush-table-proc/reached, /2-1051660059/flush-table-proc/abort 2024-11-21T00:28:56,736 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:56,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,756 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:56,788 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /2-1051660059/online-snapshot/acquired, /2-1051660059/online-snapshot/reached, /2-1051660059/online-snapshot/abort 2024-11-21T00:28:56,789 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:56,792 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-21T00:28:56,793 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:56,794 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-21T00:28:56,794 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-21T00:28:56,794 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5ed4808ef0e6,37889,1732148934945 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=5, maxPoolSize=5 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5ed4808ef0e6:0, corePoolSize=10, maxPoolSize=10 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:56,796 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,801 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:56,801 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-21T00:28:56,802 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:56,802 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-21T00:28:56,808 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732148966808 2024-11-21T00:28:56,808 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-21T00:28:56,809 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-21T00:28:56,809 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-21T00:28:56,809 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-21T00:28:56,809 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-21T00:28:56,809 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-21T00:28:56,812 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,821 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-21T00:28:56,821 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-21T00:28:56,821 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-21T00:28:56,821 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner 2024-11-21T00:28:56,822 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-21T00:28:56,822 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-21T00:28:56,822 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148936822,5,FailOnTimeoutGroup] 2024-11-21T00:28:56,823 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148936822,5,FailOnTimeoutGroup] 2024-11-21T00:28:56,823 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,823 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-21T00:28:56,823 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,823 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741831_1007 (size=1321) 2024-11-21T00:28:56,835 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(746): ClusterId : 23bbf42b-2379-405b-a2dd-f46ca66079c9 2024-11-21T00:28:56,835 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-21T00:28:56,841 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-21T00:28:56,841 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-21T00:28:56,852 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-21T00:28:56,852 DEBUG [RS:0;5ed4808ef0e6:35845 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f319825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5ed4808ef0e6/172.17.0.2:0 2024-11-21T00:28:56,865 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5ed4808ef0e6:35845 2024-11-21T00:28:56,865 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-21T00:28:56,865 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-21T00:28:56,865 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-21T00:28:56,865 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(2659): reportForDuty to master=5ed4808ef0e6,37889,1732148934945 with port=35845, startcode=1732148935107 2024-11-21T00:28:56,866 DEBUG [RS:0;5ed4808ef0e6:35845 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-21T00:28:56,867 INFO [HMaster-EventLoopGroup-49-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36291, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.23 (auth:SIMPLE), service=RegionServerStatusService 2024-11-21T00:28:56,868 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37889 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:56,868 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37889 {}] master.ServerManager(517): Registering regionserver=5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:56,870 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88 2024-11-21T00:28:56,870 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43965 2024-11-21T00:28:56,870 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-21T00:28:56,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059/rs 2024-11-21T00:28:56,916 DEBUG [RS:0;5ed4808ef0e6:35845 {}] zookeeper.ZKUtil(111): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on existing znode=/2-1051660059/rs/5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:56,917 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5ed4808ef0e6,35845,1732148935107] 2024-11-21T00:28:56,917 WARN [RS:0;5ed4808ef0e6:35845 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-21T00:28:56,917 INFO [RS:0;5ed4808ef0e6:35845 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:56,917 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/WALs/5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:56,934 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-21T00:28:56,936 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-21T00:28:56,937 INFO [RS:0;5ed4808ef0e6:35845 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-21T00:28:56,937 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,944 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-21T00:28:56,945 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-21T00:28:56,945 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,945 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,945 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,945 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,945 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,945 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,945 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=2, maxPoolSize=2 2024-11-21T00:28:56,945 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,946 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,946 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,946 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,946 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,946 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5ed4808ef0e6:0, corePoolSize=1, maxPoolSize=1 2024-11-21T00:28:56,946 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:56,946 DEBUG [RS:0;5ed4808ef0e6:35845 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5ed4808ef0e6:0, corePoolSize=3, maxPoolSize=3 2024-11-21T00:28:56,960 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,960 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,960 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,960 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,960 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,960 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35845,1732148935107-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:56,979 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-21T00:28:56,980 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,35845,1732148935107-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,980 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,980 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.Replication(171): 5ed4808ef0e6,35845,1732148935107 started 2024-11-21T00:28:56,999 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:56,999 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1482): Serving as 5ed4808ef0e6,35845,1732148935107, RpcServer on 5ed4808ef0e6/172.17.0.2:35845, sessionid=0x1015acb039c0007 2024-11-21T00:28:56,999 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-21T00:28:56,999 DEBUG [RS:0;5ed4808ef0e6:35845 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:56,999 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,35845,1732148935107' 2024-11-21T00:28:56,999 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/2-1051660059/flush-table-proc/abort' 2024-11-21T00:28:56,999 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/2-1051660059/flush-table-proc/acquired' 2024-11-21T00:28:57,000 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-21T00:28:57,000 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-21T00:28:57,000 DEBUG [RS:0;5ed4808ef0e6:35845 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:57,000 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5ed4808ef0e6,35845,1732148935107' 2024-11-21T00:28:57,000 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/2-1051660059/online-snapshot/abort' 2024-11-21T00:28:57,000 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/2-1051660059/online-snapshot/acquired' 2024-11-21T00:28:57,000 DEBUG [RS:0;5ed4808ef0e6:35845 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-21T00:28:57,000 INFO [RS:0;5ed4808ef0e6:35845 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-21T00:28:57,000 INFO [RS:0;5ed4808ef0e6:35845 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-21T00:28:57,104 INFO [RS:0;5ed4808ef0e6:35845 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-21T00:28:57,107 INFO [RS:0;5ed4808ef0e6:35845 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C35845%2C1732148935107, suffix=, logDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/WALs/5ed4808ef0e6,35845,1732148935107, archiveDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/oldWALs, maxLogs=10 2024-11-21T00:28:57,126 DEBUG [RS:0;5ed4808ef0e6:35845 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/WALs/5ed4808ef0e6,35845,1732148935107/5ed4808ef0e6%2C35845%2C1732148935107.1732148937107, exclude list is [], retry=0 2024-11-21T00:28:57,131 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46487,DS-274b5745-a7ed-469f-82cd-e2d6cecdf978,DISK] 2024-11-21T00:28:57,147 INFO [RS:0;5ed4808ef0e6:35845 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/WALs/5ed4808ef0e6,35845,1732148935107/5ed4808ef0e6%2C35845%2C1732148935107.1732148937107 2024-11-21T00:28:57,148 DEBUG [RS:0;5ed4808ef0e6:35845 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45675:45675)] 2024-11-21T00:28:57,224 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-21T00:28:57,224 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88 2024-11-21T00:28:57,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741833_1009 (size=32) 2024-11-21T00:28:57,453 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:28:57,454 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,455 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,517 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,518 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,535 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:28:57,633 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:57,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:57,645 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:57,645 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:57,645 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:57,646 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:57,647 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:57,647 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:57,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:57,647 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:57,649 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:57,649 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:57,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:57,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:57,651 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:57,651 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:57,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:57,651 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:57,652 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740 2024-11-21T00:28:57,653 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740 2024-11-21T00:28:57,656 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:57,656 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:57,657 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:57,658 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:57,666 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:57,668 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68719035, jitterRate=0.023993417620658875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:57,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732148937633Initializing all the Stores at 1732148937633Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148937633Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148937636 (+3 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148937636Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148937636Cleaning up temporary data from old regions at 1732148937656 (+20 ms)Region opened successfully at 1732148937668 (+12 ms) 2024-11-21T00:28:57,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:28:57,668 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:28:57,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:28:57,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:28:57,668 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:28:57,672 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:28:57,672 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148937668Disabling compacts and flushes for region at 1732148937668Disabling writes for close at 1732148937668Writing region close event to WAL at 1732148937672 (+4 ms)Closed at 1732148937672 2024-11-21T00:28:57,683 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:57,683 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-21T00:28:57,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-21T00:28:57,694 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:57,695 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-21T00:28:57,848 DEBUG [5ed4808ef0e6:37889 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-21T00:28:57,849 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:57,853 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,35845,1732148935107, state=OPENING 2024-11-21T00:28:57,904 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-21T00:28:57,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:57,916 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:28:57,917 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-21T00:28:57,917 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35845,1732148935107}] 2024-11-21T00:28:57,918 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-1051660059/meta-region-server: CHANGED 2024-11-21T00:28:57,920 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-1051660059/meta-region-server: CHANGED 2024-11-21T00:28:58,072 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:28:58,073 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52573, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:28:58,084 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-21T00:28:58,084 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:28:58,085 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-21T00:28:58,107 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C35845%2C1732148935107.meta, suffix=.meta, logDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/WALs/5ed4808ef0e6,35845,1732148935107, archiveDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/oldWALs, maxLogs=10 2024-11-21T00:28:58,133 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/WALs/5ed4808ef0e6,35845,1732148935107/5ed4808ef0e6%2C35845%2C1732148935107.meta.1732148938107.meta, exclude list is [], retry=0 2024-11-21T00:28:58,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46487,DS-274b5745-a7ed-469f-82cd-e2d6cecdf978,DISK] 2024-11-21T00:28:58,157 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/WALs/5ed4808ef0e6,35845,1732148935107/5ed4808ef0e6%2C35845%2C1732148935107.meta.1732148938107.meta 2024-11-21T00:28:58,160 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45675:45675)] 2024-11-21T00:28:58,160 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:58,161 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:58,161 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:28:58,161 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-21T00:28:58,161 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-21T00:28:58,161 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-21T00:28:58,161 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:58,161 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-21T00:28:58,161 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-21T00:28:58,164 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-21T00:28:58,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-21T00:28:58,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:58,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-21T00:28:58,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-21T00:28:58,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:58,176 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-21T00:28:58,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-21T00:28:58,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:58,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-21T00:28:58,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-21T00:28:58,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-21T00:28:58,190 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-21T00:28:58,191 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740 2024-11-21T00:28:58,192 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740 2024-11-21T00:28:58,197 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-21T00:28:58,197 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-21T00:28:58,197 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-21T00:28:58,199 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-21T00:28:58,201 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66224454, jitterRate=-0.013178735971450806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-21T00:28:58,201 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-21T00:28:58,201 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732148938162Writing region info on filesystem at 1732148938162Initializing all the Stores at 1732148938162Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148938162Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148938164 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148938164Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732148938164Cleaning up temporary data from old regions at 1732148938197 (+33 ms)Running coprocessor post-open hooks at 1732148938201 (+4 ms)Region opened successfully at 1732148938201 2024-11-21T00:28:58,202 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732148938072 2024-11-21T00:28:58,204 DEBUG [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-21T00:28:58,204 INFO [RS_OPEN_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-21T00:28:58,205 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:58,206 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5ed4808ef0e6,35845,1732148935107, state=OPEN 2024-11-21T00:28:58,216 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/2-1051660059/meta-region-server 2024-11-21T00:28:58,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-1051660059/meta-region-server: CHANGED 2024-11-21T00:28:58,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/2-1051660059/meta-region-server 2024-11-21T00:28:58,217 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /2-1051660059/meta-region-server: CHANGED 2024-11-21T00:28:58,217 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35845,1732148935107 2024-11-21T00:28:58,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-21T00:28:58,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=5ed4808ef0e6,35845,1732148935107 in 300 msec 2024-11-21T00:28:58,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-21T00:28:58,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 538 msec 2024-11-21T00:28:58,223 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-21T00:28:58,223 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-21T00:28:58,224 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:58,224 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35845,1732148935107, seqNum=-1] 2024-11-21T00:28:58,224 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:58,225 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41621, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:58,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4360 sec 2024-11-21T00:28:58,229 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732148938229, completionTime=-1 2024-11-21T00:28:58,229 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-21T00:28:58,230 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=1 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732148998232 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732149058232 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37889,1732148934945-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37889,1732148934945-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37889,1732148934945-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5ed4808ef0e6:37889, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:58,232 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:58,235 DEBUG [master/5ed4808ef0e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-21T00:28:58,237 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:58,240 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.567sec 2024-11-21T00:28:58,240 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-21T00:28:58,241 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-21T00:28:58,241 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-21T00:28:58,241 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-21T00:28:58,241 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-21T00:28:58,241 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37889,1732148934945-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-21T00:28:58,241 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37889,1732148934945-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-21T00:28:58,245 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-21T00:28:58,245 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-21T00:28:58,245 INFO [master/5ed4808ef0e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5ed4808ef0e6,37889,1732148934945-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-21T00:28:58,249 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3667c45a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:58,249 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37889,-1 for getting cluster id 2024-11-21T00:28:58,249 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:28:58,250 DEBUG [HMaster-EventLoopGroup-49-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '23bbf42b-2379-405b-a2dd-f46ca66079c9' 2024-11-21T00:28:58,250 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:28:58,250 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "23bbf42b-2379-405b-a2dd-f46ca66079c9" 2024-11-21T00:28:58,251 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3086cbf0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:58,251 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37889,-1] 2024-11-21T00:28:58,251 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:28:58,251 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:28:58,252 INFO [HMaster-EventLoopGroup-49-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59570, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:28:58,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@426bbbb6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:28:58,253 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:28:58,254 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35845,1732148935107, seqNum=-1] 2024-11-21T00:28:58,255 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:28:58,256 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38912, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:28:58,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=5ed4808ef0e6,37889,1732148934945 2024-11-21T00:28:58,258 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=cluster2 connecting to ZooKeeper ensemble=127.0.0.1:62972 2024-11-21T00:28:58,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster20x0, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-21T00:28:58,276 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): cluster2-0x1015acb039c0008 connected 2024-11-21T00:28:58,286 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:58,287 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,40563,1732148924943 2024-11-21T00:28:58,287 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@228ef502 2024-11-21T00:28:58,287 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:58,288 INFO [HMaster-EventLoopGroup-45-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50238, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:58,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:58,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:28:58,292 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:58,292 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:28:58,293 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:58,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:58,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:28:58,370 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0c91503478896a32e34433cb639122e5, NAME => 'test,,1732148938288.0c91503478896a32e34433cb639122e5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd 2024-11-21T00:28:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:58,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:28:58,433 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148938288.0c91503478896a32e34433cb639122e5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:58,433 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing 0c91503478896a32e34433cb639122e5, disabling compactions & flushes 2024-11-21T00:28:58,433 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:28:58,433 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:28:58,433 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148938288.0c91503478896a32e34433cb639122e5. after waiting 0 ms 2024-11-21T00:28:58,433 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:28:58,433 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:28:58,433 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0c91503478896a32e34433cb639122e5: Waiting for close lock at 1732148938433Disabling compacts and flushes for region at 1732148938433Disabling writes for close at 1732148938433Writing region close event to WAL at 1732148938433Closed at 1732148938433 2024-11-21T00:28:58,435 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:58,436 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148938288.0c91503478896a32e34433cb639122e5.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148938435"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148938435"}]},"ts":"1732148938435"} 2024-11-21T00:28:58,439 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:58,441 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:58,441 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148938441"}]},"ts":"1732148938441"} 2024-11-21T00:28:58,444 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:28:58,445 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=0c91503478896a32e34433cb639122e5, ASSIGN}] 2024-11-21T00:28:58,446 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=0c91503478896a32e34433cb639122e5, ASSIGN 2024-11-21T00:28:58,447 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=0c91503478896a32e34433cb639122e5, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,38737,1732148925192; forceNewPlan=false, retain=false 2024-11-21T00:28:58,601 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0c91503478896a32e34433cb639122e5, regionState=OPENING, regionLocation=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:58,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=0c91503478896a32e34433cb639122e5, ASSIGN because future has completed 2024-11-21T00:28:58,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c91503478896a32e34433cb639122e5, server=5ed4808ef0e6,38737,1732148925192}] 2024-11-21T00:28:58,790 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:28:58,791 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0c91503478896a32e34433cb639122e5, NAME => 'test,,1732148938288.0c91503478896a32e34433cb639122e5.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:58,791 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:58,791 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:28:58,791 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,791 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148938288.0c91503478896a32e34433cb639122e5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:58,791 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,791 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,823 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,827 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c91503478896a32e34433cb639122e5 columnFamilyName f 2024-11-21T00:28:58,827 DEBUG [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,833 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] regionserver.HStore(327): Store=0c91503478896a32e34433cb639122e5/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:58,833 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,838 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c91503478896a32e34433cb639122e5 columnFamilyName f1 2024-11-21T00:28:58,838 DEBUG [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,839 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] regionserver.HStore(327): Store=0c91503478896a32e34433cb639122e5/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:58,840 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,841 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c91503478896a32e34433cb639122e5 columnFamilyName norep 2024-11-21T00:28:58,841 DEBUG [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,841 INFO [StoreOpener-0c91503478896a32e34433cb639122e5-1 {}] regionserver.HStore(327): Store=0c91503478896a32e34433cb639122e5/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:58,841 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,842 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,842 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,844 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,844 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,845 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:58,846 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,860 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:58,861 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0c91503478896a32e34433cb639122e5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61149096, jitterRate=-0.08880746364593506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:58,861 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0c91503478896a32e34433cb639122e5 2024-11-21T00:28:58,861 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0c91503478896a32e34433cb639122e5: Running coprocessor pre-open hook at 1732148938792Writing region info on filesystem at 1732148938792Initializing all the Stores at 1732148938804 (+12 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148938804Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148938823 (+19 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148938823Cleaning up temporary data from old regions at 1732148938844 (+21 ms)Running coprocessor post-open hooks at 1732148938861 (+17 ms)Region opened successfully at 1732148938861 2024-11-21T00:28:58,862 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148938288.0c91503478896a32e34433cb639122e5., pid=6, masterSystemTime=1732148938778 2024-11-21T00:28:58,864 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:28:58,864 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0c91503478896a32e34433cb639122e5, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:28:58,864 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:28:58,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c91503478896a32e34433cb639122e5, server=5ed4808ef0e6,38737,1732148925192 because future has completed 2024-11-21T00:28:58,878 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:28:58,881 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:58,881 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148938881"}]},"ts":"1732148938881"} 2024-11-21T00:28:58,880 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:28:58,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=0c91503478896a32e34433cb639122e5, ASSIGN in 434 msec 2024-11-21T00:28:58,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0c91503478896a32e34433cb639122e5, server=5ed4808ef0e6,38737,1732148925192 in 256 msec 2024-11-21T00:28:58,883 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:28:58,883 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:58,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 594 msec 2024-11-21T00:28:58,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:58,918 INFO [RPCClient-NioEventLoopGroup-4-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:28:58,918 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:28:58,919 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37585,1732148929254 2024-11-21T00:28:58,919 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@65ac3477 2024-11-21T00:28:58,919 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:28:58,920 INFO [HMaster-EventLoopGroup-47-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48528, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:28:58,921 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:28:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:28:58,931 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:28:58,931 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:58,932 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:28:58,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:58,939 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:28:59,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:28:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:59,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:59,417 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e4156432cf81317006fd52a2ffca50a1, NAME => 'test,,1732148938920.e4156432cf81317006fd52a2ffca50a1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314 2024-11-21T00:28:59,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:28:59,491 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148938920.e4156432cf81317006fd52a2ffca50a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:59,491 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing e4156432cf81317006fd52a2ffca50a1, disabling compactions & flushes 2024-11-21T00:28:59,491 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:28:59,491 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:28:59,491 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. after waiting 0 ms 2024-11-21T00:28:59,491 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:28:59,491 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:28:59,491 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for e4156432cf81317006fd52a2ffca50a1: Waiting for close lock at 1732148939491Disabling compacts and flushes for region at 1732148939491Disabling writes for close at 1732148939491Writing region close event to WAL at 1732148939491Closed at 1732148939491 2024-11-21T00:28:59,492 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:28:59,493 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148938920.e4156432cf81317006fd52a2ffca50a1.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148939492"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148939492"}]},"ts":"1732148939492"} 2024-11-21T00:28:59,495 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:28:59,496 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:28:59,496 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148939496"}]},"ts":"1732148939496"} 2024-11-21T00:28:59,498 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:28:59,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=e4156432cf81317006fd52a2ffca50a1, ASSIGN}] 2024-11-21T00:28:59,501 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=e4156432cf81317006fd52a2ffca50a1, ASSIGN 2024-11-21T00:28:59,502 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=e4156432cf81317006fd52a2ffca50a1, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,40747,1732148929411; forceNewPlan=false, retain=false 2024-11-21T00:28:59,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:28:59,656 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4156432cf81317006fd52a2ffca50a1, regionState=OPENING, regionLocation=5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:59,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-48-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=e4156432cf81317006fd52a2ffca50a1, ASSIGN because future has completed 2024-11-21T00:28:59,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4156432cf81317006fd52a2ffca50a1, server=5ed4808ef0e6,40747,1732148929411}] 2024-11-21T00:28:59,852 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:28:59,852 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => e4156432cf81317006fd52a2ffca50a1, NAME => 'test,,1732148938920.e4156432cf81317006fd52a2ffca50a1.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:28:59,853 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:28:59,853 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:28:59,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148938920.e4156432cf81317006fd52a2ffca50a1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:28:59,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,868 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,878 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4156432cf81317006fd52a2ffca50a1 columnFamilyName f 2024-11-21T00:28:59,878 DEBUG [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:59,879 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] regionserver.HStore(327): Store=e4156432cf81317006fd52a2ffca50a1/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:59,879 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,883 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4156432cf81317006fd52a2ffca50a1 columnFamilyName f1 2024-11-21T00:28:59,883 DEBUG [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:59,887 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] regionserver.HStore(327): Store=e4156432cf81317006fd52a2ffca50a1/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:59,887 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,897 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4156432cf81317006fd52a2ffca50a1 columnFamilyName norep 2024-11-21T00:28:59,897 DEBUG [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:28:59,899 INFO [StoreOpener-e4156432cf81317006fd52a2ffca50a1-1 {}] regionserver.HStore(327): Store=e4156432cf81317006fd52a2ffca50a1/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:28:59,900 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,900 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,901 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,901 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,901 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,902 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:28:59,904 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,913 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:28:59,914 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened e4156432cf81317006fd52a2ffca50a1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61286414, jitterRate=-0.08676126599311829}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:28:59,915 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:28:59,915 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for e4156432cf81317006fd52a2ffca50a1: Running coprocessor pre-open hook at 1732148939853Writing region info on filesystem at 1732148939853Initializing all the Stores at 1732148939857 (+4 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148939857Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148939868 (+11 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148939868Cleaning up temporary data from old regions at 1732148939901 (+33 ms)Running coprocessor post-open hooks at 1732148939915 (+14 ms)Region opened successfully at 1732148939915 2024-11-21T00:28:59,920 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148938920.e4156432cf81317006fd52a2ffca50a1., pid=6, masterSystemTime=1732148939834 2024-11-21T00:28:59,927 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=e4156432cf81317006fd52a2ffca50a1, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,40747,1732148929411 2024-11-21T00:28:59,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-48-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4156432cf81317006fd52a2ffca50a1, server=5ed4808ef0e6,40747,1732148929411 because future has completed 2024-11-21T00:28:59,932 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:28:59,932 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:28:59,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:28:59,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure e4156432cf81317006fd52a2ffca50a1, server=5ed4808ef0e6,40747,1732148929411 in 264 msec 2024-11-21T00:28:59,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:28:59,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=e4156432cf81317006fd52a2ffca50a1, ASSIGN in 446 msec 2024-11-21T00:28:59,950 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:28:59,950 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148939950"}]},"ts":"1732148939950"} 2024-11-21T00:28:59,952 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:28:59,957 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:28:59,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 1.0360 sec 2024-11-21T00:29:00,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37585 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:29:00,078 INFO [RPCClient-NioEventLoopGroup-4-10 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:29:00,078 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:00,079 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37889,1732148934945 2024-11-21T00:29:00,079 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@68340164 2024-11-21T00:29:00,079 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:00,080 INFO [HMaster-EventLoopGroup-49-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59578, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:00,081 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'test', {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:29:00,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=test 2024-11-21T00:29:00,084 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:29:00,084 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:00,085 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "test" procId is: 4 2024-11-21T00:29:00,085 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:29:00,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:29:00,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741835_1011 (size=902) 2024-11-21T00:29:00,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:29:00,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:29:00,518 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 31faf65a546f672b94789a29bee40c63, NAME => 'test,,1732148940081.31faf65a546f672b94789a29bee40c63.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88 2024-11-21T00:29:00,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741836_1012 (size=39) 2024-11-21T00:29:00,532 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(898): Instantiated test,,1732148940081.31faf65a546f672b94789a29bee40c63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:00,532 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1722): Closing 31faf65a546f672b94789a29bee40c63, disabling compactions & flushes 2024-11-21T00:29:00,532 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1755): Closing region test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:00,532 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:00,532 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on test,,1732148940081.31faf65a546f672b94789a29bee40c63. after waiting 0 ms 2024-11-21T00:29:00,532 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:00,532 INFO [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1973): Closed test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:00,532 DEBUG [RegionOpenAndInit-test-pool-0 {}] regionserver.HRegion(1676): Region close journal for 31faf65a546f672b94789a29bee40c63: Waiting for close lock at 1732148940532Disabling compacts and flushes for region at 1732148940532Disabling writes for close at 1732148940532Writing region close event to WAL at 1732148940532Closed at 1732148940532 2024-11-21T00:29:00,533 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:29:00,533 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"test,,1732148940081.31faf65a546f672b94789a29bee40c63.","families":{"info":[{"qualifier":"regioninfo","vlen":38,"tag":[],"timestamp":"1732148940533"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148940533"}]},"ts":"1732148940533"} 2024-11-21T00:29:00,535 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:29:00,536 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:29:00,536 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148940536"}]},"ts":"1732148940536"} 2024-11-21T00:29:00,537 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLING in hbase:meta 2024-11-21T00:29:00,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=31faf65a546f672b94789a29bee40c63, ASSIGN}] 2024-11-21T00:29:00,539 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=test, region=31faf65a546f672b94789a29bee40c63, ASSIGN 2024-11-21T00:29:00,539 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=test, region=31faf65a546f672b94789a29bee40c63, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,35845,1732148935107; forceNewPlan=false, retain=false 2024-11-21T00:29:00,690 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=31faf65a546f672b94789a29bee40c63, regionState=OPENING, regionLocation=5ed4808ef0e6,35845,1732148935107 2024-11-21T00:29:00,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=test, region=31faf65a546f672b94789a29bee40c63, ASSIGN because future has completed 2024-11-21T00:29:00,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31faf65a546f672b94789a29bee40c63, server=5ed4808ef0e6,35845,1732148935107}] 2024-11-21T00:29:00,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:29:00,852 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:00,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 31faf65a546f672b94789a29bee40c63, NAME => 'test,,1732148940081.31faf65a546f672b94789a29bee40c63.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:29:00,853 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:29:00,853 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter loaded, priority=536870911. 2024-11-21T00:29:00,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table test 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated test,,1732148940081.31faf65a546f672b94789a29bee40c63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:00,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,853 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,860 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f of region 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,861 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31faf65a546f672b94789a29bee40c63 columnFamilyName f 2024-11-21T00:29:00,861 DEBUG [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:00,862 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] regionserver.HStore(327): Store=31faf65a546f672b94789a29bee40c63/f, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:00,862 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family f1 of region 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,863 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31faf65a546f672b94789a29bee40c63 columnFamilyName f1 2024-11-21T00:29:00,863 DEBUG [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:00,864 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] regionserver.HStore(327): Store=31faf65a546f672b94789a29bee40c63/f1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:00,864 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family norep of region 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,865 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31faf65a546f672b94789a29bee40c63 columnFamilyName norep 2024-11-21T00:29:00,865 DEBUG [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:00,870 INFO [StoreOpener-31faf65a546f672b94789a29bee40c63-1 {}] regionserver.HStore(327): Store=31faf65a546f672b94789a29bee40c63/norep, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:00,870 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,871 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,878 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,881 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,881 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,882 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:29:00,883 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,889 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:29:00,890 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 31faf65a546f672b94789a29bee40c63; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65827637, jitterRate=-0.01909177005290985}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:29:00,890 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:00,890 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 31faf65a546f672b94789a29bee40c63: Running coprocessor pre-open hook at 1732148940854Writing region info on filesystem at 1732148940854Initializing all the Stores at 1732148940858 (+4 ms)Instantiating store for column family {NAME => 'f', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148940858Instantiating store for column family {NAME => 'f1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '1', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148940860 (+2 ms)Instantiating store for column family {NAME => 'norep', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148940860Cleaning up temporary data from old regions at 1732148940882 (+22 ms)Running coprocessor post-open hooks at 1732148940890 (+8 ms)Region opened successfully at 1732148940890 2024-11-21T00:29:00,892 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for test,,1732148940081.31faf65a546f672b94789a29bee40c63., pid=6, masterSystemTime=1732148940848 2024-11-21T00:29:00,894 DEBUG [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:00,894 INFO [RS_OPEN_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:00,895 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=31faf65a546f672b94789a29bee40c63, regionState=OPEN, repBarrier=2, openSeqNum=2, regionLocation=5ed4808ef0e6,35845,1732148935107 2024-11-21T00:29:00,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31faf65a546f672b94789a29bee40c63, server=5ed4808ef0e6,35845,1732148935107 because future has completed 2024-11-21T00:29:00,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-21T00:29:00,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 31faf65a546f672b94789a29bee40c63, server=5ed4808ef0e6,35845,1732148935107 in 218 msec 2024-11-21T00:29:00,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-21T00:29:00,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=test, region=31faf65a546f672b94789a29bee40c63, ASSIGN in 375 msec 2024-11-21T00:29:00,916 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:29:00,916 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"test","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148940916"}]},"ts":"1732148940916"} 2024-11-21T00:29:00,918 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=test, state=ENABLED in hbase:meta 2024-11-21T00:29:00,919 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=test execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:29:00,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=test in 838 msec 2024-11-21T00:29:01,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37889 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-21T00:29:01,228 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:test completed 2024-11-21T00:29:01,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b90cf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:01,229 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,40563,-1 for getting cluster id 2024-11-21T00:29:01,229 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:01,230 DEBUG [HMaster-EventLoopGroup-45-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2769c2a5-6298-47aa-8583-d1e5652dbcb6' 2024-11-21T00:29:01,230 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:01,230 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2769c2a5-6298-47aa-8583-d1e5652dbcb6" 2024-11-21T00:29:01,231 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b0c6a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:01,231 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,40563,-1] 2024-11-21T00:29:01,231 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:01,231 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:01,232 INFO [HMaster-EventLoopGroup-45-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:01,232 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34048f2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:01,233 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:01,234 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,40563,1732148924943 2024-11-21T00:29:01,234 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@67c50218 2024-11-21T00:29:01,234 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:01,236 INFO [HMaster-EventLoopGroup-45-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:01,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=1, config=clusterKey=hbase+rpc://5ed4808ef0e6:37585,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:29:01,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:29:01,241 DEBUG [PEWorker-1 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:37585' 2024-11-21T00:29:01,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:29:01,255 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a5a4b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:01,255 DEBUG [PEWorker-1 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37585,-1 for getting cluster id 2024-11-21T00:29:01,255 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:01,258 DEBUG [HMaster-EventLoopGroup-47-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3f68a05-0b79-44d8-b908-8e41661cca8a' 2024-11-21T00:29:01,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:01,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3f68a05-0b79-44d8-b908-8e41661cca8a" 2024-11-21T00:29:01,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fbea77e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:01,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37585,-1] 2024-11-21T00:29:01,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:01,259 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:01,260 INFO [HMaster-EventLoopGroup-47-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48540, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:01,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76099496, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:01,261 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:01,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37585,1732148929254 2024-11-21T00:29:01,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5c20e7db 2024-11-21T00:29:01,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:01,263 INFO [HMaster-EventLoopGroup-47-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48552, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:01,263 INFO [PEWorker-1 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-1. 2024-11-21T00:29:01,263 DEBUG [PEWorker-1 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:29:01,263 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:01,264 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:01,264 DEBUG [PEWorker-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:01,264 INFO [PEWorker-1 {}] master.HMaster(2490): Client=null/null create 'hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-21T00:29:01,265 DEBUG [PEWorker-1 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:replication 2024-11-21T00:29:01,274 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_PRE_OPERATION 2024-11-21T00:29:01,274 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:01,276 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-21T00:29:01,316 DEBUG [PEWorker-1 {}] procedure.ProcedureSyncWait(219): waitFor Creating table hbase:replication 2024-11-21T00:29:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:29:01,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741837_1013 (size=1138) 2024-11-21T00:29:01,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:29:01,803 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1ebb67afce9ecc21dff27310bd89573a, NAME => 'hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:replication', {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.regionserver.region.split_restriction.delimiter' => '-', 'hbase.regionserver.region.split_restriction.type' => 'DelimitedKeyPrefix', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd 2024-11-21T00:29:01,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741838_1014 (size=44) 2024-11-21T00:29:01,888 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:01,888 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1722): Closing 1ebb67afce9ecc21dff27310bd89573a, disabling compactions & flushes 2024-11-21T00:29:01,888 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:01,889 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:01,889 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. after waiting 0 ms 2024-11-21T00:29:01,889 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:01,889 INFO [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1973): Closed hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:01,889 DEBUG [RegionOpenAndInit-hbase:replication-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1ebb67afce9ecc21dff27310bd89573a: Waiting for close lock at 1732148941888Disabling compacts and flushes for region at 1732148941888Disabling writes for close at 1732148941889 (+1 ms)Writing region close event to WAL at 1732148941889Closed at 1732148941889 2024-11-21T00:29:01,890 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ADD_TO_META 2024-11-21T00:29:01,890 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a.","families":{"info":[{"qualifier":"regioninfo","vlen":43,"tag":[],"timestamp":"1732148941890"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732148941890"}]},"ts":"1732148941890"} 2024-11-21T00:29:01,897 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-21T00:29:01,898 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-21T00:29:01,898 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148941898"}]},"ts":"1732148941898"} 2024-11-21T00:29:01,901 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLING in hbase:meta 2024-11-21T00:29:01,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=1ebb67afce9ecc21dff27310bd89573a, ASSIGN}] 2024-11-21T00:29:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:29:01,908 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=1ebb67afce9ecc21dff27310bd89573a, ASSIGN 2024-11-21T00:29:01,910 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=1ebb67afce9ecc21dff27310bd89573a, ASSIGN; state=OFFLINE, location=5ed4808ef0e6,38737,1732148925192; forceNewPlan=false, retain=false 2024-11-21T00:29:02,061 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1ebb67afce9ecc21dff27310bd89573a, regionState=OPENING, regionLocation=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:29:02,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:replication, region=1ebb67afce9ecc21dff27310bd89573a, ASSIGN because future has completed 2024-11-21T00:29:02,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ebb67afce9ecc21dff27310bd89573a, server=5ed4808ef0e6,38737,1732148925192}] 2024-11-21T00:29:02,248 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(132): Open hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:02,249 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-21T00:29:02,249 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] monitor.StreamSlowMonitor(122): New stream slow monitor rep 2024-11-21T00:29:02,253 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(613): WAL configuration: blocksize=20 KB, rollsize=10 KB, prefix=5ed4808ef0e6%2C38737%2C1732148925192.rep, suffix=, logDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192, archiveDir=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/oldWALs, maxLogs=10 2024-11-21T00:29:02,283 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.rep.1732148942254, exclude list is [], retry=0 2024-11-21T00:29:02,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-823beef4-9d61-40e1-b761-72a26cd5c543,DISK] 2024-11-21T00:29:02,309 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.rep.1732148942254 2024-11-21T00:29:02,310 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43193:43193)] 2024-11-21T00:29:02,310 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 1ebb67afce9ecc21dff27310bd89573a, NAME => 'hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a.', STARTKEY => '', ENDKEY => ''} 2024-11-21T00:29:02,310 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver loaded, priority=536870911. 2024-11-21T00:29:02,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-21T00:29:02,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. service=MultiRowMutationService 2024-11-21T00:29:02,311 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:replication successfully. 2024-11-21T00:29:02,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table replication 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(898): Instantiated hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-21T00:29:02,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,317 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family hfileref of region 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,322 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ebb67afce9ecc21dff27310bd89573a columnFamilyName hfileref 2024-11-21T00:29:02,322 DEBUG [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:02,323 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] regionserver.HStore(327): Store=1ebb67afce9ecc21dff27310bd89573a/hfileref, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:02,323 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family queue of region 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,324 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ebb67afce9ecc21dff27310bd89573a columnFamilyName queue 2024-11-21T00:29:02,324 DEBUG [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:02,324 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] regionserver.HStore(327): Store=1ebb67afce9ecc21dff27310bd89573a/queue, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:02,324 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family sid of region 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,325 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1ebb67afce9ecc21dff27310bd89573a columnFamilyName sid 2024-11-21T00:29:02,325 DEBUG [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-21T00:29:02,325 INFO [StoreOpener-1ebb67afce9ecc21dff27310bd89573a-1 {}] regionserver.HStore(327): Store=1ebb67afce9ecc21dff27310bd89573a/sid, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-21T00:29:02,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,326 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,327 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,327 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,327 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,328 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:replication descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-21T00:29:02,329 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,331 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-21T00:29:02,331 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1114): Opened 1ebb67afce9ecc21dff27310bd89573a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62944211, jitterRate=-0.06205816566944122}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-21T00:29:02,332 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:02,333 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 1ebb67afce9ecc21dff27310bd89573a: Running coprocessor pre-open hook at 1732148942311Writing region info on filesystem at 1732148942311Initializing all the Stores at 1732148942313 (+2 ms)Instantiating store for column family {NAME => 'hfileref', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148942313Instantiating store for column family {NAME => 'queue', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148942317 (+4 ms)Instantiating store for column family {NAME => 'sid', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732148942317Cleaning up temporary data from old regions at 1732148942327 (+10 ms)Running coprocessor post-open hooks at 1732148942332 (+5 ms)Region opened successfully at 1732148942333 (+1 ms) 2024-11-21T00:29:02,334 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a., pid=10, masterSystemTime=1732148942236 2024-11-21T00:29:02,335 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:02,335 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=10}] handler.AssignRegionHandler(153): Opened hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:02,336 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=1ebb67afce9ecc21dff27310bd89573a, regionState=OPEN, openSeqNum=2, regionLocation=5ed4808ef0e6,38737,1732148925192 2024-11-21T00:29:02,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1ebb67afce9ecc21dff27310bd89573a, server=5ed4808ef0e6,38737,1732148925192 because future has completed 2024-11-21T00:29:02,341 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-21T00:29:02,342 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 1ebb67afce9ecc21dff27310bd89573a, server=5ed4808ef0e6,38737,1732148925192 in 261 msec 2024-11-21T00:29:02,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-21T00:29:02,343 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:replication, region=1ebb67afce9ecc21dff27310bd89573a, ASSIGN in 441 msec 2024-11-21T00:29:02,344 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-21T00:29:02,344 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:replication","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732148942344"}]},"ts":"1732148942344"} 2024-11-21T00:29:02,346 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:replication, state=ENABLED in hbase:meta 2024-11-21T00:29:02,347 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=8, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:replication execute state=CREATE_TABLE_POST_OPERATION 2024-11-21T00:29:02,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:replication in 1.0830 sec 2024-11-21T00:29:02,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-', locateType=CURRENT is [region=hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2] 2024-11-21T00:29:02,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:29:02,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:29:02,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:29:02,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:29:02,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38737 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=11 2024-11-21T00:29:02,617 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=1, type=ADD_PEER 2024-11-21T00:29:02,655 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.ReplicationSource(231): queueId=1-5ed4808ef0e6,38737,1732148925192, ReplicationSource: 1, currentBandwidth=0 2024-11-21T00:29:02,655 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:29:02,655 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38737,1732148925192, seqNum=-1] 2024-11-21T00:29:02,656 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:02,657 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-46-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39063, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=ClientService 2024-11-21T00:29:02,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:replication', row='1-5ed4808ef0e6,38737,1732148925192', locateType=CURRENT is [region=hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2] 2024-11-21T00:29:02,684 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=11}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=11 2024-11-21T00:29:02,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.HMaster(4169): Remote procedure done, pid=11 2024-11-21T00:29:02,691 INFO [PEWorker-5 {}] replication.RefreshPeerProcedure(132): Refresh peer 1 for ADD on 5ed4808ef0e6,38737,1732148925192 suceeded 2024-11-21T00:29:02,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-21T00:29:02,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 242 msec 2024-11-21T00:29:02,696 INFO [PEWorker-5 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 1, config clusterKey=hbase+rpc://5ed4808ef0e6:37585,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:29:02,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 1.4600 sec 2024-11-21T00:29:02,707 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:37585' 2024-11-21T00:29:02,708 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@d830cd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:02,708 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37585,-1 for getting cluster id 2024-11-21T00:29:02,709 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:02,710 DEBUG [HMaster-EventLoopGroup-47-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3f68a05-0b79-44d8-b908-8e41661cca8a' 2024-11-21T00:29:02,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:02,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3f68a05-0b79-44d8-b908-8e41661cca8a" 2024-11-21T00:29:02,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@44d428a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:02,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37585,-1] 2024-11-21T00:29:02,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:02,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:02,712 INFO [HMaster-EventLoopGroup-47-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40312, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:02,715 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@5d9f617d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:02,716 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:02,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37585,1732148929254 2024-11-21T00:29:02,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a1f24c8 2024-11-21T00:29:02,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:02,718 INFO [HMaster-EventLoopGroup-47-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40326, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=MasterService 2024-11-21T00:29:02,721 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSource(613): peerId=1, queueId=1-5ed4808ef0e6,38737,1732148925192 (queues=1) is replicating from cluster=2769c2a5-6298-47aa-8583-d1e5652dbcb6 to cluster=d3f68a05-0b79-44d8-b908-8e41661cca8a 2024-11-21T00:29:02,721 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSource(382): peerId=1, starting shipping worker for walGroupId=5ed4808ef0e6%2C38737%2C1732148925192 2024-11-21T00:29:02,721 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=1-5ed4808ef0e6,38737,1732148925192, ReplicationSourceWALReaderThread : 1 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:29:02,722 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438, startPosition=0, beingWritten=true 2024-11-21T00:29:02,734 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:29:02,741 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C38737%2C1732148925192 2024-11-21T00:29:02,781 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:29:02,781 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 379, reset compression=false 2024-11-21T00:29:02,782 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38737,1732148925192 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438, lastWalPosition=379, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:29:03,019 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 379, reset compression=false 2024-11-21T00:29:03,372 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 379, reset compression=false 2024-11-21T00:29:03,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-21T00:29:03,428 INFO [RPCClient-NioEventLoopGroup-4-14 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 1 completed 2024-11-21T00:29:03,432 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:03,432 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:306) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:03,433 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:03,433 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:03,433 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:03,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c886e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,437 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,40563,-1 for getting cluster id 2024-11-21T00:29:03,437 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:03,438 DEBUG [HMaster-EventLoopGroup-45-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2769c2a5-6298-47aa-8583-d1e5652dbcb6' 2024-11-21T00:29:03,438 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:03,438 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2769c2a5-6298-47aa-8583-d1e5652dbcb6" 2024-11-21T00:29:03,439 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a3fc02c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,439 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,40563,-1] 2024-11-21T00:29:03,439 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:03,439 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:03,440 INFO [HMaster-EventLoopGroup-45-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46588, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:03,441 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ff867b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,457 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@efd61b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,457 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37585,-1 for getting cluster id 2024-11-21T00:29:03,457 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:03,461 DEBUG [HMaster-EventLoopGroup-47-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3f68a05-0b79-44d8-b908-8e41661cca8a' 2024-11-21T00:29:03,461 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:03,461 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3f68a05-0b79-44d8-b908-8e41661cca8a" 2024-11-21T00:29:03,461 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11ef9be6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,461 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37585,-1] 2024-11-21T00:29:03,462 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:03,462 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:03,463 INFO [HMaster-EventLoopGroup-47-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40334, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:03,472 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67ade804, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b49a038, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,477 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37889,-1 for getting cluster id 2024-11-21T00:29:03,478 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:03,478 DEBUG [HMaster-EventLoopGroup-49-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '23bbf42b-2379-405b-a2dd-f46ca66079c9' 2024-11-21T00:29:03,479 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:03,479 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "23bbf42b-2379-405b-a2dd-f46ca66079c9" 2024-11-21T00:29:03,479 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c17e4db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,479 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37889,-1] 2024-11-21T00:29:03,479 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:03,479 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:03,481 INFO [HMaster-EventLoopGroup-49-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46096, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:03,482 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35fe78f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:03,483 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:29:03,501 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_0 is 36, key is mmmm/f:row/1732148943500/Put/seqid=0 2024-11-21T00:29:03,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741840_1016 (size=7894) 2024-11-21T00:29:03,664 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:29:03,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,671 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,672 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,673 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,796 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,802 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 379, reset compression=false 2024-11-21T00:29:03,816 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:03,959 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_1 is 34, key is ppp/f:row/1732148943959/Put/seqid=0 2024-11-21T00:29:04,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741841_1017 (size=7691) 2024-11-21T00:29:04,348 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 379, reset compression=false 2024-11-21T00:29:04,416 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60cb7923, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:04,417 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,40563,-1 for getting cluster id 2024-11-21T00:29:04,417 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:04,424 DEBUG [HMaster-EventLoopGroup-45-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2769c2a5-6298-47aa-8583-d1e5652dbcb6' 2024-11-21T00:29:04,428 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:04,428 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2769c2a5-6298-47aa-8583-d1e5652dbcb6" 2024-11-21T00:29:04,429 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@346c6d94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:04,429 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,40563,-1] 2024-11-21T00:29:04,429 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:04,429 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:04,430 INFO [HMaster-EventLoopGroup-45-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46604, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:04,431 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1be6b6e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:04,431 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:29:04,432 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38737,1732148925192, seqNum=-1] 2024-11-21T00:29:04,432 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:04,433 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-46-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57164, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:04,449 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:04,450 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,40563,1732148924943 2024-11-21T00:29:04,451 DEBUG [Time-limited test {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@748e94b5 2024-11-21T00:29:04,451 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:04,452 INFO [HMaster-EventLoopGroup-45-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46618, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:04,456 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148938288.0c91503478896a32e34433cb639122e5., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2] 2024-11-21T00:29:04,459 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:29:04,480 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_0 first=Optional[mmmm] last=Optional[oooo] 2024-11-21T00:29:04,492 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_1 first=Optional[ppp] last=Optional[rrr] 2024-11-21T00:29:04,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_0 for inclusion in 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:04,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(626): HFile bounds: first=mmmm last=oooo 2024-11-21T00:29:04,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:04,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_1 for inclusion in 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:04,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(626): HFile bounds: first=ppp last=rrr 2024-11-21T00:29:04,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:04,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HRegion(2603): Flush status journal for 0c91503478896a32e34433cb639122e5: 2024-11-21T00:29:04,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_0 to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_0 2024-11-21T00:29:04,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_0 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ 2024-11-21T00:29:04,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_0/f/hfile_1 to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_1 2024-11-21T00:29:04,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_1 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ 2024-11-21T00:29:04,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_0 into 0c91503478896a32e34433cb639122e5/f as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ - updating store file list. 2024-11-21T00:29:04,549 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:replication' 2024-11-21T00:29:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStoreFile(483): HFile Bloom filter type for f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:04,552 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ into 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:04,552 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_0 into 0c91503478896a32e34433cb639122e5/f (new location: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_) 2024-11-21T00:29:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_0 2024-11-21T00:29:04,553 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_1 into 0c91503478896a32e34433cb639122e5/f as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ - updating store file list. 2024-11-21T00:29:04,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStoreFile(483): HFile Bloom filter type for bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:04,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ into 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:04,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_1 into 0c91503478896a32e34433cb639122e5/f (new location: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_) 2024-11-21T00:29:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__j19kc3rnhslnd47dmq12mp8ojm8on53k5dpqqocfgg1e2gdgjb7ll4jdlglb7rg0/f/hfile_1 2024-11-21T00:29:04,566 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:04,567 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.replication.TestMasterReplication.loadAndValidateHFileReplication(TestMasterReplication.java:720) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:320) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:04,567 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:04,567 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:04,567 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:04,567 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncRegionLocatorHelper(64): Try updating region=test,,1732148938288.0c91503478896a32e34433cb639122e5., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2 , the old value is region=test,,1732148938288.0c91503478896a32e34433cb639122e5., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=5ed4808ef0e6:38737 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:29:04,567 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=test,,1732148938288.0c91503478896a32e34433cb639122e5., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-21T00:29:04,567 DEBUG [RPCClient-NioEventLoopGroup-4-7 {}] client.AsyncRegionLocatorHelper(88): Try removing region=test,,1732148938288.0c91503478896a32e34433cb639122e5., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2 from cache 2024-11-21T00:29:04,567 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:29:04,568 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,40747,1732148929411, seqNum=-1] 2024-11-21T00:29:04,569 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:04,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-48-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:04,572 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-46-1 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-11-21T00:29:04,573 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148938920.e4156432cf81317006fd52a2ffca50a1., hostname=5ed4808ef0e6,40747,1732148929411, seqNum=2] 2024-11-21T00:29:04,574 INFO [Time-limited test {}] replication.TestMasterReplication(739): Waiting more time for bulkloaded data replication. 2024-11-21T00:29:04,677 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'test', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:38737 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-11-21T00:29:04,968 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 379, reset compression=false 2024-11-21T00:29:04,992 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:29:04,992 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38737,1732148925192 got entry batch from reader: WALEntryBatch [walEntries=[{test/0c91503478896a32e34433cb639122e5/5=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148944565/Put/vlen=190/seqid=0; >],8098}], lastWalPath=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438, lastWalPosition=687, nbRowKeys=1, nbHFiles=2, heapSize=8098, lastSeqIds={}, endOfFile=false,usedBufferSize=407] 2024-11-21T00:29:04,995 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:29:04,996 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-48-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57260, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=AdminService 2024-11-21T00:29:04,997 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.ReplicationSink(318): Replicating [2769c2a5-6298-47aa-8583-d1e5652dbcb6] bulk loaded data 2024-11-21T00:29:05,004 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@344b8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,004 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37585,-1 for getting cluster id 2024-11-21T00:29:05,004 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:05,008 DEBUG [HMaster-EventLoopGroup-47-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd3f68a05-0b79-44d8-b908-8e41661cca8a' 2024-11-21T00:29:05,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:05,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d3f68a05-0b79-44d8-b908-8e41661cca8a" 2024-11-21T00:29:05,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@180dcad4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37585,-1] 2024-11-21T00:29:05,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:05,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:05,010 INFO [HMaster-EventLoopGroup-47-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40340, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.22 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:05,011 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@45cf4d66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741837_1013 (size=7894) 2024-11-21T00:29:05,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741838_1014 (size=7691) 2024-11-21T00:29:05,152 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:05,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37585,1732148929254 2024-11-21T00:29:05,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7d3e501b 2024-11-21T00:29:05,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:05,157 INFO [HMaster-EventLoopGroup-47-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40356, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.22 (auth:SIMPLE), service=MasterService 2024-11-21T00:29:05,159 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:29:05,159 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:29:05,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,40747,1732148929411, seqNum=-1] 2024-11-21T00:29:05,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:05,163 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-48-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57264, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.22 (auth:SIMPLE), service=ClientService 2024-11-21T00:29:05,192 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 687, reset compression=false 2024-11-21T00:29:05,194 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ first=Optional[ppp] last=Optional[rrr] 2024-11-21T00:29:05,208 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ first=Optional[mmmm] last=Optional[oooo] 2024-11-21T00:29:05,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ for inclusion in e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:05,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(626): HFile bounds: first=mmmm last=oooo 2024-11-21T00:29:05,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:05,255 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ for inclusion in e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:05,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(626): HFile bounds: first=ppp last=rrr 2024-11-21T00:29:05,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:05,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HRegion(2603): Flush status journal for e4156432cf81317006fd52a2ffca50a1: 2024-11-21T00:29:05,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:29:05,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/33c71143e894438dafd6480fb7b00080_SeqId_4_ 2024-11-21T00:29:05,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:29:05,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5552dacf6648432d881678f32cf88f8c_SeqId_4_ 2024-11-21T00:29:05,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ into e4156432cf81317006fd52a2ffca50a1/f as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/33c71143e894438dafd6480fb7b00080_SeqId_4_ - updating store file list. 2024-11-21T00:29:05,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 33c71143e894438dafd6480fb7b00080_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:05,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/33c71143e894438dafd6480fb7b00080_SeqId_4_ into e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:05,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ into e4156432cf81317006fd52a2ffca50a1/f (new location: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/33c71143e894438dafd6480fb7b00080_SeqId_4_) 2024-11-21T00:29:05,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/f3e48d7d54d84fc780c39b76de04b52d_SeqId_4_ 2024-11-21T00:29:05,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ into e4156432cf81317006fd52a2ffca50a1/f as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5552dacf6648432d881678f32cf88f8c_SeqId_4_ - updating store file list. 2024-11-21T00:29:05,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5552dacf6648432d881678f32cf88f8c_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:05,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5552dacf6648432d881678f32cf88f8c_SeqId_4_ into e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:05,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ into e4156432cf81317006fd52a2ffca50a1/f (new location: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5552dacf6648432d881678f32cf88f8c_SeqId_4_) 2024-11-21T00:29:05,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__5tuv33i8704qmn1nrme7bc7mkfpe0sqhgv627kt2384oul8qa7dgmdjslje2g8ga/f/bba1eda7f3ec42e5bcd425fe6e44b8b7_SeqId_4_ 2024-11-21T00:29:05,279 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.ReplicationSink(324): Finished replicating [2769c2a5-6298-47aa-8583-d1e5652dbcb6] bulk loaded data 2024-11-21T00:29:05,521 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 687, reset compression=false 2024-11-21T00:29:05,584 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:29:05,586 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35845,1732148935107, seqNum=-1] 2024-11-21T00:29:05,586 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:05,588 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41298, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:05,590 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148940081.31faf65a546f672b94789a29bee40c63., hostname=5ed4808ef0e6,35845,1732148935107, seqNum=2] 2024-11-21T00:29:05,602 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:29:05,604 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-46-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57168, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-21T00:29:05,609 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL AsyncFSWAL 5ed4808ef0e6%2C38737%2C1732148925192.meta:.meta(num 1732148926946) roll requested 2024-11-21T00:29:05,634 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.meta.1732148945609.meta, exclude list is [], retry=0 2024-11-21T00:29:05,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-823beef4-9d61-40e1-b761-72a26cd5c543,DISK] 2024-11-21T00:29:05,650 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.meta.1732148926946.meta with entries=13, filesize=3.38 KB; new WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.meta.1732148945609.meta 2024-11-21T00:29:05,652 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43193:43193)] 2024-11-21T00:29:05,653 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.meta.1732148926946.meta is not closed yet, will try archiving it next time 2024-11-21T00:29:05,653 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL AsyncFSWAL 5ed4808ef0e6%2C38737%2C1732148925192.rep:(num 1732148942254) roll requested 2024-11-21T00:29:05,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741834_1010 (size=3473) 2024-11-21T00:29:05,687 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.rep.1732148945653, exclude list is [], retry=0 2024-11-21T00:29:05,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-823beef4-9d61-40e1-b761-72a26cd5c543,DISK] 2024-11-21T00:29:05,709 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.rep.1732148942254 with entries=6, filesize=1.46 KB; new WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.rep.1732148945653 2024-11-21T00:29:05,724 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43193:43193)] 2024-11-21T00:29:05,725 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.rep.1732148942254 is not closed yet, will try archiving it next time 2024-11-21T00:29:05,725 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL AsyncFSWAL 5ed4808ef0e6%2C38737%2C1732148925192:(num 1732148926438) roll requested 2024-11-21T00:29:05,743 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725, exclude list is [], retry=0 2024-11-21T00:29:05,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741839_1015 (size=1505) 2024-11-21T00:29:05,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-46-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40739,DS-823beef4-9d61-40e1-b761-72a26cd5c543,DISK] 2024-11-21T00:29:05,775 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 with entries=2, filesize=687 B; new WAL /user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 2024-11-21T00:29:05,784 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:43193:43193)] 2024-11-21T00:29:05,784 DEBUG [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 is not closed yet, will try archiving it next time 2024-11-21T00:29:05,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741832_1008 (size=695) 2024-11-21T00:29:05,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d4d29b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,801 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,40563,-1 for getting cluster id 2024-11-21T00:29:05,801 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:05,802 DEBUG [HMaster-EventLoopGroup-45-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2769c2a5-6298-47aa-8583-d1e5652dbcb6' 2024-11-21T00:29:05,802 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:05,802 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2769c2a5-6298-47aa-8583-d1e5652dbcb6" 2024-11-21T00:29:05,802 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45079a89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,802 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,40563,-1] 2024-11-21T00:29:05,803 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:05,803 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:05,804 INFO [HMaster-EventLoopGroup-45-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46634, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:05,809 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ad82254, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,809 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:05,810 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,40563,1732148924943 2024-11-21T00:29:05,810 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1b214e4e 2024-11-21T00:29:05,811 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:05,812 INFO [HMaster-EventLoopGroup-45-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46646, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:05,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.HMaster(3973): Client=jenkins//172.17.0.2 creating replication peer, id=2, config=clusterKey=hbase+rpc://5ed4808ef0e6:37889,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false, state=ENABLED 2024-11-21T00:29:05,815 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/oldWALs/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 2024-11-21T00:29:05,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:PRE_PEER_MODIFICATION, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure 2024-11-21T00:29:05,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:29:05,819 DEBUG [PEWorker-2 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:37889' 2024-11-21T00:29:05,820 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@618d23d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,820 DEBUG [PEWorker-2 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37889,-1 for getting cluster id 2024-11-21T00:29:05,820 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:05,825 DEBUG [HMaster-EventLoopGroup-49-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '23bbf42b-2379-405b-a2dd-f46ca66079c9' 2024-11-21T00:29:05,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:05,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "23bbf42b-2379-405b-a2dd-f46ca66079c9" 2024-11-21T00:29:05,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a58ec4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37889,-1] 2024-11-21T00:29:05,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:05,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:05,828 INFO [HMaster-EventLoopGroup-49-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46122, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:05,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6093e1a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:05,830 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:05,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37889,1732148934945 2024-11-21T00:29:05,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@18df84f6 2024-11-21T00:29:05,832 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:05,833 INFO [HMaster-EventLoopGroup-49-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46136, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:05,834 INFO [PEWorker-2 {}] client.AsyncConnectionImpl(233): Connection has been closed by PEWorker-2. 2024-11-21T00:29:05,834 DEBUG [PEWorker-2 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkClusterKey(ReplicationPeerManager.java:438) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.checkPeerConfig(ReplicationPeerManager.java:475) at org.apache.hadoop.hbase.master.replication.ReplicationPeerManager.preAddPeer(ReplicationPeerManager.java:172) at org.apache.hadoop.hbase.master.replication.AddPeerProcedure.prePeerModification(AddPeerProcedure.java:118) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:188) at org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure.executeFromState(ModifyPeerProcedure.java:45) at org.apache.hadoop.hbase.procedure2.StateMachineProcedure.execute(StateMachineProcedure.java:188) at org.apache.hadoop.hbase.procedure2.Procedure.doExecute(Procedure.java:941) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.execProcedure(ProcedureExecutor.java:1825) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor.executeProcedure(ProcedureExecutor.java:1503) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.runProcedure(ProcedureExecutor.java:2154) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.procedure2.ProcedureExecutor$WorkerThread.run(ProcedureExecutor.java:2181) 2024-11-21T00:29:05,834 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:05,834 DEBUG [PEWorker-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:05,835 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:05,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure}] 2024-11-21T00:29:05,925 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 to pos 687, reset compression=false 2024-11-21T00:29:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:29:05,932 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] wal.AbstractFSWALProvider(535): Log hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 was moved to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/oldWALs/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 2024-11-21T00:29:05,935 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(456): EOF, closing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438 2024-11-21T00:29:05,935 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38737,1732148925192 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148926438, lastWalPosition=-1, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=true,usedBufferSize=0] 2024-11-21T00:29:05,936 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725, startPosition=0, beingWritten=true 2024-11-21T00:29:05,941 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceManager(720): Removing 1 logs in the list: [5ed4808ef0e6%2C38737%2C1732148925192.1732148926438] 2024-11-21T00:29:06,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38737 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable, pid=13 2024-11-21T00:29:06,028 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.RefreshPeerCallable(47): Received a peer change event, peerId=2, type=ADD_PEER 2024-11-21T00:29:06,116 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.ReplicationSource(231): queueId=2-5ed4808ef0e6,38737,1732148925192, ReplicationSource: 2, currentBandwidth=0 2024-11-21T00:29:06,137 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0 {event_type=RS_REFRESH_PEER, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-21T00:29:06,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-21T00:29:06,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:29:06,143 INFO [PEWorker-1 {}] replication.RefreshPeerProcedure(132): Refresh peer 2 for ADD on 5ed4808ef0e6,38737,1732148925192 suceeded 2024-11-21T00:29:06,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-21T00:29:06,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.RefreshPeerProcedure in 270 msec 2024-11-21T00:29:06,157 INFO [PEWorker-3 {}] replication.AddPeerProcedure(130): Successfully added ENABLED peer 2, config clusterKey=hbase+rpc://5ed4808ef0e6:37889,replicationEndpointImpl=null,replicateAllUserTables=true,bandwidth=0,serial=false 2024-11-21T00:29:06,163 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.replication.AddPeerProcedure in 345 msec 2024-11-21T00:29:06,193 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] client.RpcConnectionRegistryURIFactory(40): connect to hbase cluster with rpc bootstrap servers='5ed4808ef0e6:37889' 2024-11-21T00:29:06,204 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@1426bde7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:06,205 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37889,-1 for getting cluster id 2024-11-21T00:29:06,205 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:06,206 DEBUG [HMaster-EventLoopGroup-49-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '23bbf42b-2379-405b-a2dd-f46ca66079c9' 2024-11-21T00:29:06,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:06,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "23bbf42b-2379-405b-a2dd-f46ca66079c9" 2024-11-21T00:29:06,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@5aee5f1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:06,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37889,-1] 2024-11-21T00:29:06,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:06,207 INFO [HMaster-EventLoopGroup-49-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46154, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:06,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:06,208 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@4649889e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:06,208 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:06,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37889,1732148934945 2024-11-21T00:29:06,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@72a8b37a 2024-11-21T00:29:06,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:06,210 INFO [HMaster-EventLoopGroup-49-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46158, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=MasterService 2024-11-21T00:29:06,211 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSource(613): peerId=2, queueId=2-5ed4808ef0e6,38737,1732148925192 (queues=1) is replicating from cluster=2769c2a5-6298-47aa-8583-d1e5652dbcb6 to cluster=23bbf42b-2379-405b-a2dd-f46ca66079c9 2024-11-21T00:29:06,211 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSource(382): peerId=2, starting shipping worker for walGroupId=5ed4808ef0e6%2C38737%2C1732148925192 2024-11-21T00:29:06,211 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(111): peerClusterZnode=2-5ed4808ef0e6,38737,1732148925192, ReplicationSourceWALReaderThread : 2 inited, replicationBatchSizeCapacity=1024, replicationBatchCountCapacity=25000, replicationBatchQueueCapacity=1 2024-11-21T00:29:06,222 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(254): Creating new reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725, startPosition=0, beingWritten=true 2024-11-21T00:29:06,236 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(98): Running ReplicationSourceShipper Thread for wal group: 5ed4808ef0e6%2C38737%2C1732148925192 2024-11-21T00:29:06,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40563 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-21T00:29:06,450 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:06,450 INFO [RPCClient-NioEventLoopGroup-4-14 {}] client.RawAsyncHBaseAdmin$ReplicationProcedureBiConsumer(3004): Operation: ADD_REPLICATION_PEER, peerId: 2 completed 2024-11-21T00:29:06,450 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.replication.TestMasterReplication.addPeer(TestMasterReplication.java:620) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:329) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:06,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:06,451 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:06,451 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:06,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-21T00:29:06,464 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 0, reset compression=false 2024-11-21T00:29:06,465 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 0, reset compression=false 2024-11-21T00:29:06,506 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_0 is 36, key is ssss/f:row/1732148946505/Put/seqid=0 2024-11-21T00:29:06,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741845_1021 (size=11194) 2024-11-21T00:29:06,889 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 0, reset compression=false 2024-11-21T00:29:07,016 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_1 is 34, key is vvv/f:row/1732148947015/Put/seqid=0 2024-11-21T00:29:07,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741846_1022 (size=10791) 2024-11-21T00:29:07,059 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e36fe40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:07,060 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,40563,-1 for getting cluster id 2024-11-21T00:29:07,061 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:07,062 DEBUG [HMaster-EventLoopGroup-45-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2769c2a5-6298-47aa-8583-d1e5652dbcb6' 2024-11-21T00:29:07,062 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:07,062 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2769c2a5-6298-47aa-8583-d1e5652dbcb6" 2024-11-21T00:29:07,062 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12f9afdd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:07,063 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,40563,-1] 2024-11-21T00:29:07,063 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:07,063 DEBUG [RPCClient-NioEventLoopGroup-4-15 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:07,065 INFO [HMaster-EventLoopGroup-45-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46674, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:07,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21d989f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:07,066 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:29:07,068 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,38737,1732148925192, seqNum=-1] 2024-11-21T00:29:07,068 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:07,069 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-46-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57176, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-21T00:29:07,091 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:07,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,40563,1732148924943 2024-11-21T00:29:07,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1f4a4897 2024-11-21T00:29:07,092 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:07,093 INFO [HMaster-EventLoopGroup-45-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46678, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-21T00:29:07,097 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'test', row='', locateType=CURRENT is [region=test,,1732148938288.0c91503478896a32e34433cb639122e5., hostname=5ed4808ef0e6,38737,1732148925192, seqNum=2] 2024-11-21T00:29:07,109 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:29:07,135 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 0, reset compression=false 2024-11-21T00:29:07,143 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_0 first=Optional[ssss] last=Optional[uuuu] 2024-11-21T00:29:07,160 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_1 first=Optional[vvv] last=Optional[xxx] 2024-11-21T00:29:07,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_0 for inclusion in 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:07,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(626): HFile bounds: first=ssss last=uuuu 2024-11-21T00:29:07,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:07,229 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_1 for inclusion in 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:07,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(626): HFile bounds: first=vvv last=xxx 2024-11-21T00:29:07,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:07,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HRegion(2603): Flush status journal for 0c91503478896a32e34433cb639122e5: 2024-11-21T00:29:07,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_0 to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_0 2024-11-21T00:29:07,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_0 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ 2024-11-21T00:29:07,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/testHFileCyclicReplication_1/f/hfile_1 to hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_1 2024-11-21T00:29:07,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_1 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ 2024-11-21T00:29:07,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_0 into 0c91503478896a32e34433cb639122e5/f as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ - updating store file list. 2024-11-21T00:29:07,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 49541f8648ee422cba0743c67bf248d8_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:07,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ into 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:07,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_0 into 0c91503478896a32e34433cb639122e5/f (new location: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_) 2024-11-21T00:29:07,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_0 2024-11-21T00:29:07,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_1 into 0c91503478896a32e34433cb639122e5/f as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ - updating store file list. 2024-11-21T00:29:07,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5cc60779937644b3a59296a71ac8776d_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:07,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ into 0c91503478896a32e34433cb639122e5/f 2024-11-21T00:29:07,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_1 into 0c91503478896a32e34433cb639122e5/f (new location: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_) 2024-11-21T00:29:07,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38737 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/staging/jenkins__test__75qu470fiu62ei2d2lp4smcg9svldmsale40k656642ra4tvpmeed26coqlhafca/f/hfile_1 2024-11-21T00:29:07,305 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 0, reset compression=false 2024-11-21T00:29:07,309 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:07,309 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.replication.TestMasterReplication.loadAndValidateHFileReplication(TestMasterReplication.java:720) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:340) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:07,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:07,310 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:07,310 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:07,313 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:29:07,320 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 2-5ed4808ef0e6,38737,1732148925192 got entry batch from reader: WALEntryBatch [walEntries=[{test/0c91503478896a32e34433cb639122e5/7=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148947296/Put/vlen=190/seqid=0; >],11198}], lastWalPath=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725, lastWalPosition=393, nbRowKeys=1, nbHFiles=2, heapSize=11198, lastSeqIds={}, endOfFile=false,usedBufferSize=407] 2024-11-21T00:29:07,323 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-21T00:29:07,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-50-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41304, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.21 (auth:SIMPLE), service=AdminService 2024-11-21T00:29:07,325 INFO [Time-limited test {}] replication.TestMasterReplication(739): Waiting more time for bulkloaded data replication. 2024-11-21T00:29:07,327 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.ReplicationSink(318): Replicating [2769c2a5-6298-47aa-8583-d1e5652dbcb6] bulk loaded data 2024-11-21T00:29:07,337 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@3a224c18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:07,337 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] client.ClusterIdFetcher(90): Going to request 5ed4808ef0e6,37889,-1 for getting cluster id 2024-11-21T00:29:07,338 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-21T00:29:07,338 DEBUG [HMaster-EventLoopGroup-49-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '23bbf42b-2379-405b-a2dd-f46ca66079c9' 2024-11-21T00:29:07,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-21T00:29:07,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "23bbf42b-2379-405b-a2dd-f46ca66079c9" 2024-11-21T00:29:07,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@28167680, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:07,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [5ed4808ef0e6,37889,-1] 2024-11-21T00:29:07,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-21T00:29:07,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:07,341 INFO [HMaster-EventLoopGroup-49-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46178, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.23 (auth:SIMPLE), service=ClientMetaService 2024-11-21T00:29:07,342 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags@72e7639f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-21T00:29:07,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741837_1013 (size=11194) 2024-11-21T00:29:07,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741838_1014 (size=10791) 2024-11-21T00:29:07,516 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:07,523 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-21T00:29:07,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 5ed4808ef0e6,37889,1732148934945 2024-11-21T00:29:07,524 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1a2d671f 2024-11-21T00:29:07,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-21T00:29:07,527 INFO [HMaster-EventLoopGroup-49-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46184, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.23 (auth:SIMPLE), service=MasterService 2024-11-21T00:29:07,530 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:29:07,530 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-21T00:29:07,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=5ed4808ef0e6,35845,1732148935107, seqNum=-1] 2024-11-21T00:29:07,531 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-21T00:29:07,532 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-50-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41310, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.23 (auth:SIMPLE), service=ClientService 2024-11-21T00:29:07,562 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ first=Optional[ssss] last=Optional[uuuu] 2024-11-21T00:29:07,575 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ first=Optional[vvv] last=Optional[xxx] 2024-11-21T00:29:07,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ for inclusion in 31faf65a546f672b94789a29bee40c63/f 2024-11-21T00:29:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(626): HFile bounds: first=ssss last=uuuu 2024-11-21T00:29:07,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:07,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ for inclusion in 31faf65a546f672b94789a29bee40c63/f 2024-11-21T00:29:07,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(626): HFile bounds: first=vvv last=xxx 2024-11-21T00:29:07,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:07,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HRegion(2603): Flush status journal for 31faf65a546f672b94789a29bee40c63: 2024-11-21T00:29:07,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 49541f8648ee422cba0743c67bf248d8_SeqId_6_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:29:07,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/d8403e9eabdd47bd91c51a2093f02a86_SeqId_4_ 2024-11-21T00:29:07,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 5cc60779937644b3a59296a71ac8776d_SeqId_6_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:29:07,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/f8035c5c54564e30a1d57290c26ef374_SeqId_4_ 2024-11-21T00:29:07,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ into 31faf65a546f672b94789a29bee40c63/f as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/d8403e9eabdd47bd91c51a2093f02a86_SeqId_4_ - updating store file list. 2024-11-21T00:29:07,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStoreFile(483): HFile Bloom filter type for d8403e9eabdd47bd91c51a2093f02a86_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:07,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/d8403e9eabdd47bd91c51a2093f02a86_SeqId_4_ into 31faf65a546f672b94789a29bee40c63/f 2024-11-21T00:29:07,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ into 31faf65a546f672b94789a29bee40c63/f (new location: hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/d8403e9eabdd47bd91c51a2093f02a86_SeqId_4_) 2024-11-21T00:29:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ 2024-11-21T00:29:07,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ into 31faf65a546f672b94789a29bee40c63/f as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/f8035c5c54564e30a1d57290c26ef374_SeqId_4_ - updating store file list. 2024-11-21T00:29:07,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStoreFile(483): HFile Bloom filter type for f8035c5c54564e30a1d57290c26ef374_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:07,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/f8035c5c54564e30a1d57290c26ef374_SeqId_4_ into 31faf65a546f672b94789a29bee40c63/f 2024-11-21T00:29:07,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ into 31faf65a546f672b94789a29bee40c63/f (new location: hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/f/f8035c5c54564e30a1d57290c26ef374_SeqId_4_) 2024-11-21T00:29:07,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/staging/jenkins.hfs.23__test__ao5qbaeqa0tu1f1l6468lg40cfqlbrn5bgr7bfvsblo2sokt9i17vej3eq6k6bre/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ 2024-11-21T00:29:07,647 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35845 {}] regionserver.ReplicationSink(324): Finished replicating [2769c2a5-6298-47aa-8583-d1e5652dbcb6] bulk loaded data 2024-11-21T00:29:07,847 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:07,854 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 0, reset compression=false 2024-11-21T00:29:07,901 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(177): Read 1 WAL entries eligible for replication 2024-11-21T00:29:07,904 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38737,1732148925192 got entry batch from reader: WALEntryBatch [walEntries=[{test/0c91503478896a32e34433cb639122e5/7=[#edits: 1 = <\x00/METAFAMILY:HBASE::BULK_LOAD/1732148947296/Put/vlen=190/seqid=0; >],11198}], lastWalPath=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725, lastWalPosition=393, nbRowKeys=1, nbHFiles=2, heapSize=11198, lastSeqIds={}, endOfFile=false,usedBufferSize=407] 2024-11-21T00:29:07,905 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.ReplicationSink(318): Replicating [2769c2a5-6298-47aa-8583-d1e5652dbcb6] bulk loaded data 2024-11-21T00:29:07,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741839_1015 (size=11194) 2024-11-21T00:29:08,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741840_1016 (size=10791) 2024-11-21T00:29:08,047 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=test,, stopping at row=test ,, for max=2147483647 with caching=100 2024-11-21T00:29:08,076 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ first=Optional[ssss] last=Optional[uuuu] 2024-11-21T00:29:08,082 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ first=Optional[vvv] last=Optional[xxx] 2024-11-21T00:29:08,092 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ for inclusion in e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:08,104 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(626): HFile bounds: first=ssss last=uuuu 2024-11-21T00:29:08,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:08,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ for inclusion in e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(626): HFile bounds: first=vvv last=xxx 2024-11-21T00:29:08,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-21T00:29:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HRegion(2603): Flush status journal for e4156432cf81317006fd52a2ffca50a1: 2024-11-21T00:29:08,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 49541f8648ee422cba0743c67bf248d8_SeqId_6_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:29:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5abc437f3b9344c199353c0164f39f7d_SeqId_6_ 2024-11-21T00:29:08,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(375): 5cc60779937644b3a59296a71ac8776d_SeqId_6_ is already available in staging directory. Skipping copy or rename. 2024-11-21T00:29:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/907db15e41a341b9a893e16858fb415f_SeqId_6_ 2024-11-21T00:29:08,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ into e4156432cf81317006fd52a2ffca50a1/f as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5abc437f3b9344c199353c0164f39f7d_SeqId_6_ - updating store file list. 2024-11-21T00:29:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 5abc437f3b9344c199353c0164f39f7d_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:08,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5abc437f3b9344c199353c0164f39f7d_SeqId_6_ into e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:08,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ into e4156432cf81317006fd52a2ffca50a1/f (new location: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/5abc437f3b9344c199353c0164f39f7d_SeqId_6_) 2024-11-21T00:29:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/49541f8648ee422cba0743c67bf248d8_SeqId_6_ 2024-11-21T00:29:08,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ into e4156432cf81317006fd52a2ffca50a1/f as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/907db15e41a341b9a893e16858fb415f_SeqId_6_ - updating store file list. 2024-11-21T00:29:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 907db15e41a341b9a893e16858fb415f_SeqId_6_: NONE, but ROW specified in column family configuration 2024-11-21T00:29:08,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/907db15e41a341b9a893e16858fb415f_SeqId_6_ into e4156432cf81317006fd52a2ffca50a1/f 2024-11-21T00:29:08,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ into e4156432cf81317006fd52a2ffca50a1/f (new location: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/f/907db15e41a341b9a893e16858fb415f_SeqId_6_) 2024-11-21T00:29:08,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/staging/jenkins.hfs.22__test__8ukatls48qma0jqipapiqs15g92v5ag6528gckjj1aol7bbr42bcecj70fbehqca/f/5cc60779937644b3a59296a71ac8776d_SeqId_6_ 2024-11-21T00:29:08,139 DEBUG [RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=40747 {}] regionserver.ReplicationSink(324): Finished replicating [2769c2a5-6298-47aa-8583-d1e5652dbcb6] bulk loaded data 2024-11-21T00:29:08,281 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:08,288 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:29:08,354 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:29:08,354 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:08,354 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:345) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:08,354 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:08,358 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:08,358 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:08,358 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:29:08,358 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=667286233, stopped=false 2024-11-21T00:29:08,358 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,37889,1732148934945 2024-11-21T00:29:08,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-1051660059/running 2024-11-21T00:29:08,419 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-1051660059/running 2024-11-21T00:29:08,419 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:29:08,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:29:08,419 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:29:08,419 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:08,419 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:345) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:08,419 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:08,420 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,35845,1732148935107' ***** 2024-11-21T00:29:08,420 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:29:08,420 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on znode that does not yet exist, /2-1051660059/running 2024-11-21T00:29:08,420 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Set watcher on znode that does not yet exist, /2-1051660059/running 2024-11-21T00:29:08,420 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:29:08,420 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:29:08,420 INFO [RS:0;5ed4808ef0e6:35845 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:29:08,421 INFO [RS:0;5ed4808ef0e6:35845 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:29:08,421 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(3091): Received CLOSE for 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:08,435 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,35845,1732148935107 2024-11-21T00:29:08,435 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:29:08,435 INFO [RS:0;5ed4808ef0e6:35845 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:35845. 2024-11-21T00:29:08,435 DEBUG [RS:0;5ed4808ef0e6:35845 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:08,435 DEBUG [RS:0;5ed4808ef0e6:35845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:08,436 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:29:08,436 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:29:08,436 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:29:08,436 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:29:08,436 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 31faf65a546f672b94789a29bee40c63, disabling compactions & flushes 2024-11-21T00:29:08,436 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:08,436 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:08,436 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148940081.31faf65a546f672b94789a29bee40c63. after waiting 0 ms 2024-11-21T00:29:08,436 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:08,445 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:08,487 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:29:08,488 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:29:08,489 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:29:08,489 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:29:08,489 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:29:08,489 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:29:08,489 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.27 KB heapSize=3.38 KB 2024-11-21T00:29:08,445 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:29:08,492 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1325): Online Regions={31faf65a546f672b94789a29bee40c63=test,,1732148940081.31faf65a546f672b94789a29bee40c63., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:29:08,492 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 31faf65a546f672b94789a29bee40c63 2024-11-21T00:29:08,504 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/default/test/31faf65a546f672b94789a29bee40c63/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-11-21T00:29:08,505 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:29:08,505 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:29:08,505 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:08,505 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 31faf65a546f672b94789a29bee40c63: Waiting for close lock at 1732148948436Running coprocessor pre-close hooks at 1732148948436Disabling compacts and flushes for region at 1732148948436Disabling writes for close at 1732148948436Writing region close event to WAL at 1732148948493 (+57 ms)Running coprocessor post-close hooks at 1732148948504 (+11 ms)Closed at 1732148948505 (+1 ms) 2024-11-21T00:29:08,505 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148940081.31faf65a546f672b94789a29bee40c63. 2024-11-21T00:29:08,522 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/info/e7bafec335454cd2a72c2ce273314410 is 129, key is test,,1732148940081.31faf65a546f672b94789a29bee40c63./info:regioninfo/1732148940895/Put/seqid=0 2024-11-21T00:29:08,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741839_1015 (size=6421) 2024-11-21T00:29:08,569 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.03 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/info/e7bafec335454cd2a72c2ce273314410 2024-11-21T00:29:08,631 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/ns/25e2cb1bbd6d474e93000853db8e3d42 is 43, key is default/ns:d/1732148938226/Put/seqid=0 2024-11-21T00:29:08,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741840_1016 (size=5153) 2024-11-21T00:29:08,693 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:08,803 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:08,895 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:08,915 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:08,979 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:29:08,979 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:29:09,088 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/ns/25e2cb1bbd6d474e93000853db8e3d42 2024-11-21T00:29:09,095 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:09,117 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/rep_barrier/2d4f5e76650f40fa8c339a8b28869785 is 112, key is test,,1732148940081.31faf65a546f672b94789a29bee40c63./rep_barrier:seqnumDuringOpen/1732148940895/Put/seqid=0 2024-11-21T00:29:09,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741841_1017 (size=5518) 2024-11-21T00:29:09,148 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/rep_barrier/2d4f5e76650f40fa8c339a8b28869785 2024-11-21T00:29:09,217 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/table/355eca9a0c5b423cac29bc0335f5ac46 is 40, key is test/table:state/1732148940916/Put/seqid=0 2024-11-21T00:29:09,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741842_1018 (size=5165) 2024-11-21T00:29:09,253 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=72 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/table/355eca9a0c5b423cac29bc0335f5ac46 2024-11-21T00:29:09,266 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/info/e7bafec335454cd2a72c2ce273314410 as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/info/e7bafec335454cd2a72c2ce273314410 2024-11-21T00:29:09,281 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/info/e7bafec335454cd2a72c2ce273314410, entries=10, sequenceid=11, filesize=6.3 K 2024-11-21T00:29:09,283 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/ns/25e2cb1bbd6d474e93000853db8e3d42 as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/ns/25e2cb1bbd6d474e93000853db8e3d42 2024-11-21T00:29:09,289 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/ns/25e2cb1bbd6d474e93000853db8e3d42, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:29:09,290 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/rep_barrier/2d4f5e76650f40fa8c339a8b28869785 as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/rep_barrier/2d4f5e76650f40fa8c339a8b28869785 2024-11-21T00:29:09,295 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/rep_barrier/2d4f5e76650f40fa8c339a8b28869785, entries=1, sequenceid=11, filesize=5.4 K 2024-11-21T00:29:09,296 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/.tmp/table/355eca9a0c5b423cac29bc0335f5ac46 as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/table/355eca9a0c5b423cac29bc0335f5ac46 2024-11-21T00:29:09,297 DEBUG [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:09,308 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/table/355eca9a0c5b423cac29bc0335f5ac46, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:29:09,310 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.27 KB/1305, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 1588230740 in 820ms, sequenceid=11, compaction requested=false 2024-11-21T00:29:09,376 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T00:29:09,377 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:29:09,377 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:29:09,377 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:29:09,378 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148948487Running coprocessor pre-close hooks at 1732148948487Disabling compacts and flushes for region at 1732148948487Disabling writes for close at 1732148948489 (+2 ms)Obtaining lock to block concurrent updates at 1732148948489Preparing flush snapshotting stores in 1588230740 at 1732148948489Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1305, getHeapSize=3392, getOffHeapSize=0, getCellsCount=15 at 1732148948490 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732148948490Flushing 1588230740/info: creating writer at 1732148948491 (+1 ms)Flushing 1588230740/info: appending metadata at 1732148948521 (+30 ms)Flushing 1588230740/info: closing flushed file at 1732148948521Flushing 1588230740/ns: creating writer at 1732148948583 (+62 ms)Flushing 1588230740/ns: appending metadata at 1732148948631 (+48 ms)Flushing 1588230740/ns: closing flushed file at 1732148948631Flushing 1588230740/rep_barrier: creating writer at 1732148949096 (+465 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148949116 (+20 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148949116Flushing 1588230740/table: creating writer at 1732148949165 (+49 ms)Flushing 1588230740/table: appending metadata at 1732148949217 (+52 ms)Flushing 1588230740/table: closing flushed file at 1732148949217Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2e6488f1: reopening flushed file at 1732148949265 (+48 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f6bbf9a: reopening flushed file at 1732148949282 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e4143b7: reopening flushed file at 1732148949289 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79f301bb: reopening flushed file at 1732148949295 (+6 ms)Finished flush of dataSize ~1.27 KB/1305, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 1588230740 in 820ms, sequenceid=11, compaction requested=false at 1732148949310 (+15 ms)Writing region close event to WAL at 1732148949334 (+24 ms)Running coprocessor post-close hooks at 1732148949377 (+43 ms)Closed at 1732148949377 2024-11-21T00:29:09,378 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:29:09,433 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:09,442 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:09,460 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04d0dd054c4f9e0316fac51b61606b4b, had cached 0 bytes from a total of 37570 2024-11-21T00:29:09,497 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,35845,1732148935107; all regions closed. 2024-11-21T00:29:09,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741834_1010 (size=2717) 2024-11-21T00:29:09,508 DEBUG [RS:0;5ed4808ef0e6:35845 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/oldWALs 2024-11-21T00:29:09,508 INFO [RS:0;5ed4808ef0e6:35845 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C35845%2C1732148935107.meta:.meta(num 1732148938107) 2024-11-21T00:29:09,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741832_1008 (size=1028) 2024-11-21T00:29:09,527 DEBUG [RS:0;5ed4808ef0e6:35845 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/oldWALs 2024-11-21T00:29:09,527 INFO [RS:0;5ed4808ef0e6:35845 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C35845%2C1732148935107:(num 1732148937107) 2024-11-21T00:29:09,527 DEBUG [RS:0;5ed4808ef0e6:35845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:09,527 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:29:09,527 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:29:09,527 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:29:09,528 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:29:09,528 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:29:09,529 INFO [RS:0;5ed4808ef0e6:35845 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:35845. 2024-11-21T00:29:09,529 DEBUG [RS:0;5ed4808ef0e6:35845 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:09,529 DEBUG [RS:0;5ed4808ef0e6:35845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:09,530 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:09,530 DEBUG [RS:0;5ed4808ef0e6:35845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:09,530 INFO [RS:0;5ed4808ef0e6:35845 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35845 2024-11-21T00:29:09,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059/rs 2024-11-21T00:29:09,553 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-1051660059/rs/5ed4808ef0e6,35845,1732148935107 2024-11-21T00:29:09,553 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:29:09,554 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,35845,1732148935107] 2024-11-21T00:29:09,577 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /2-1051660059/draining/5ed4808ef0e6,35845,1732148935107 already deleted, retry=false 2024-11-21T00:29:09,577 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,35845,1732148935107 expired; onlineServers=0 2024-11-21T00:29:09,577 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,37889,1732148934945' ***** 2024-11-21T00:29:09,577 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:29:09,577 INFO [M:0;5ed4808ef0e6:37889 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:29:09,577 INFO [M:0;5ed4808ef0e6:37889 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:29:09,577 DEBUG [M:0;5ed4808ef0e6:37889 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:29:09,578 DEBUG [M:0;5ed4808ef0e6:37889 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:29:09,578 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:29:09,578 INFO [M:0;5ed4808ef0e6:37889 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:29:09,578 INFO [M:0;5ed4808ef0e6:37889 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:29:09,578 DEBUG [M:0;5ed4808ef0e6:37889 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:29:09,578 INFO [M:0;5ed4808ef0e6:37889 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:29:09,578 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148936822 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148936822,5,FailOnTimeoutGroup] 2024-11-21T00:29:09,578 INFO [M:0;5ed4808ef0e6:37889 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:29:09,578 INFO [M:0;5ed4808ef0e6:37889 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:29:09,578 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148936822 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148936822,5,FailOnTimeoutGroup] 2024-11-21T00:29:09,580 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:29:09,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/2-1051660059/master 2024-11-21T00:29:09,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/2-1051660059 2024-11-21T00:29:09,596 INFO [M:0;5ed4808ef0e6:37889 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/.lastflushedseqids 2024-11-21T00:29:09,607 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /2-1051660059/master already deleted, retry=false 2024-11-21T00:29:09,607 DEBUG [RegionServerTracker-0 {}] master.ActiveMasterManager(353): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Failed delete of our master address node; KeeperErrorCode = NoNode for /2-1051660059/master 2024-11-21T00:29:09,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741843_1019 (size=173) 2024-11-21T00:29:09,672 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:09,672 DEBUG [pool-2605-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35845-0x1015acb039c0007, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:09,672 INFO [RS:0;5ed4808ef0e6:35845 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:29:09,672 INFO [RS:0;5ed4808ef0e6:35845 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,35845,1732148935107; zookeeper connection closed. 2024-11-21T00:29:09,680 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f747d50 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f747d50 2024-11-21T00:29:09,680 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:29:10,056 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:10,072 INFO [M:0;5ed4808ef0e6:37889 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:29:10,073 INFO [M:0;5ed4808ef0e6:37889 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:29:10,076 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:29:10,076 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:10,076 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:10,076 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:29:10,076 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:10,076 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=28.00 KB heapSize=33.88 KB 2024-11-21T00:29:10,135 DEBUG [M:0;5ed4808ef0e6:37889 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6dafe786fd0a4db9a02a6920e8efba8d is 82, key is hbase:meta,,1/info:regioninfo/1732148938205/Put/seqid=0 2024-11-21T00:29:10,156 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:10,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741844_1020 (size=5672) 2024-11-21T00:29:10,624 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6dafe786fd0a4db9a02a6920e8efba8d 2024-11-21T00:29:10,650 DEBUG [M:0;5ed4808ef0e6:37889 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a91b361311bd4348a4a3455a65e645ed is 1247, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732148940920/Put/seqid=0 2024-11-21T00:29:10,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741845_1021 (size=6587) 2024-11-21T00:29:10,779 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:10,992 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:11,104 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.45 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a91b361311bd4348a4a3455a65e645ed 2024-11-21T00:29:11,146 DEBUG [M:0;5ed4808ef0e6:37889 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c4f90d08286f460d8cb7fb7111c9f2d1 is 69, key is 5ed4808ef0e6,35845,1732148935107/rs:state/1732148936868/Put/seqid=0 2024-11-21T00:29:11,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741846_1022 (size=5156) 2024-11-21T00:29:11,163 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c4f90d08286f460d8cb7fb7111c9f2d1 2024-11-21T00:29:11,181 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6dafe786fd0a4db9a02a6920e8efba8d as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6dafe786fd0a4db9a02a6920e8efba8d 2024-11-21T00:29:11,190 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6dafe786fd0a4db9a02a6920e8efba8d, entries=8, sequenceid=55, filesize=5.5 K 2024-11-21T00:29:11,191 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a91b361311bd4348a4a3455a65e645ed as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a91b361311bd4348a4a3455a65e645ed 2024-11-21T00:29:11,198 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a91b361311bd4348a4a3455a65e645ed, entries=6, sequenceid=55, filesize=6.4 K 2024-11-21T00:29:11,199 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c4f90d08286f460d8cb7fb7111c9f2d1 as hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c4f90d08286f460d8cb7fb7111c9f2d1 2024-11-21T00:29:11,205 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43965/user/jenkins/test-data/cbb32389-d554-e1b7-0048-993cbeb83f88/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c4f90d08286f460d8cb7fb7111c9f2d1, entries=1, sequenceid=55, filesize=5.0 K 2024-11-21T00:29:11,207 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(3140): Finished flush of dataSize ~28.00 KB/28675, heapSize ~33.59 KB/34392, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1130ms, sequenceid=55, compaction requested=false 2024-11-21T00:29:11,241 INFO [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:11,241 DEBUG [M:0;5ed4808ef0e6:37889 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148950073Disabling compacts and flushes for region at 1732148950073Disabling writes for close at 1732148950076 (+3 ms)Obtaining lock to block concurrent updates at 1732148950076Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148950076Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=28675, getHeapSize=34632, getOffHeapSize=0, getCellsCount=66 at 1732148950077 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148950082 (+5 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148950082Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148950134 (+52 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148950134Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148950630 (+496 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148950649 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148950650 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148951109 (+459 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148951145 (+36 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148951146 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cbbd022: reopening flushed file at 1732148951178 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35a2dfd3: reopening flushed file at 1732148951190 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3569e4c8: reopening flushed file at 1732148951198 (+8 ms)Finished flush of dataSize ~28.00 KB/28675, heapSize ~33.59 KB/34392, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1130ms, sequenceid=55, compaction requested=false at 1732148951207 (+9 ms)Writing region close event to WAL at 1732148951241 (+34 ms)Closed at 1732148951241 2024-11-21T00:29:11,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46487 is added to blk_1073741830_1006 (size=32782) 2024-11-21T00:29:11,268 INFO [M:0;5ed4808ef0e6:37889 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:29:11,268 INFO [M:0;5ed4808ef0e6:37889 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37889 2024-11-21T00:29:11,268 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:29:11,269 INFO [M:0;5ed4808ef0e6:37889 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:29:11,427 INFO [M:0;5ed4808ef0e6:37889 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:29:11,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:11,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37889-0x1015acb039c0006, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:11,454 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52fd1809{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:29:11,454 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3026cbe4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:29:11,454 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:29:11,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bb67c5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:29:11,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44f669d7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/hadoop.log.dir/,STOPPED} 2024-11-21T00:29:11,458 WARN [BP-2040318916-172.17.0.2-1732148932267 heartbeating to localhost/127.0.0.1:43965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:29:11,458 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:29:11,458 WARN [BP-2040318916-172.17.0.2-1732148932267 heartbeating to localhost/127.0.0.1:43965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2040318916-172.17.0.2-1732148932267 (Datanode Uuid 061167e0-d276-436f-8806-542de1d2fa7b) service to localhost/127.0.0.1:43965 2024-11-21T00:29:11,458 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:29:11,458 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/cluster_492b2821-a719-7498-3548-b01a9298b34c/data/data1/current/BP-2040318916-172.17.0.2-1732148932267 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:29:11,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/cluster_492b2821-a719-7498-3548-b01a9298b34c/data/data2/current/BP-2040318916-172.17.0.2-1732148932267 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:29:11,459 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:29:11,470 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bdd2661{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:29:11,480 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3613c72b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:29:11,480 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:29:11,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@558fb9db{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:29:11,481 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50aaefd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/2e5bc16d-85e3-2cee-7133-7f3880d8011e/hadoop.log.dir/,STOPPED} 2024-11-21T00:29:11,512 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:29:11,512 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:29:11,512 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:11,512 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:345) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:11,512 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:11,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:11,513 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:11,513 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:29:11,513 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2131125769, stopped=false 2024-11-21T00:29:11,514 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,37585,1732148929254 2024-11-21T00:29:11,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1702889742/running 2024-11-21T00:29:11,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:29:11,532 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:29:11,532 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on znode that does not yet exist, /1702889742/running 2024-11-21T00:29:11,532 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:11,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1702889742/running 2024-11-21T00:29:11,532 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:345) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:11,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:29:11,532 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:11,533 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,40747,1732148929411' ***** 2024-11-21T00:29:11,533 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:29:11,533 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:29:11,533 INFO [RS:0;5ed4808ef0e6:40747 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:29:11,533 INFO [RS:0;5ed4808ef0e6:40747 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:29:11,533 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(3091): Received CLOSE for e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:29:11,534 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:29:11,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Set watcher on znode that does not yet exist, /1702889742/running 2024-11-21T00:29:11,540 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,40747,1732148929411 2024-11-21T00:29:11,540 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:29:11,540 INFO [RS:0;5ed4808ef0e6:40747 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:40747. 2024-11-21T00:29:11,541 DEBUG [RS:0;5ed4808ef0e6:40747 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:11,541 DEBUG [RS:0;5ed4808ef0e6:40747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:11,541 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:29:11,541 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:29:11,541 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:29:11,541 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:29:11,544 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-21T00:29:11,544 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1325): Online Regions={e4156432cf81317006fd52a2ffca50a1=test,,1732148938920.e4156432cf81317006fd52a2ffca50a1., 1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:29:11,544 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, e4156432cf81317006fd52a2ffca50a1 2024-11-21T00:29:11,548 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:29:11,548 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:29:11,548 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:29:11,549 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:29:11,549 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:29:11,549 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.27 KB heapSize=3.38 KB 2024-11-21T00:29:11,552 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing e4156432cf81317006fd52a2ffca50a1, disabling compactions & flushes 2024-11-21T00:29:11,552 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:29:11,552 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:29:11,552 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. after waiting 0 ms 2024-11-21T00:29:11,552 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:29:11,591 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/default/test/e4156432cf81317006fd52a2ffca50a1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T00:29:11,592 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:29:11,592 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:29:11,592 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:29:11,592 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for e4156432cf81317006fd52a2ffca50a1: Waiting for close lock at 1732148951552Running coprocessor pre-close hooks at 1732148951552Disabling compacts and flushes for region at 1732148951552Disabling writes for close at 1732148951552Writing region close event to WAL at 1732148951587 (+35 ms)Running coprocessor post-close hooks at 1732148951592 (+5 ms)Closed at 1732148951592 2024-11-21T00:29:11,592 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148938920.e4156432cf81317006fd52a2ffca50a1. 2024-11-21T00:29:11,593 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/info/bdebb88f366a49d88a23748c2e4c0511 is 129, key is test,,1732148938920.e4156432cf81317006fd52a2ffca50a1./info:regioninfo/1732148939927/Put/seqid=0 2024-11-21T00:29:11,609 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:11,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741841_1017 (size=6421) 2024-11-21T00:29:11,627 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:29:11,634 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.03 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/info/bdebb88f366a49d88a23748c2e4c0511 2024-11-21T00:29:11,692 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/ns/129536db99454a9ea9e9bedcbeb1d1e4 is 43, key is default/ns:d/1732148932108/Put/seqid=0 2024-11-21T00:29:11,747 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:11,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741842_1018 (size=5153) 2024-11-21T00:29:11,942 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:11,947 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:11,980 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:29:11,980 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:29:12,147 DEBUG [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:12,152 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/ns/129536db99454a9ea9e9bedcbeb1d1e4 2024-11-21T00:29:12,173 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/rep_barrier/7afee3acb70c4f5d96c0f22e96898b7f is 112, key is test,,1732148938920.e4156432cf81317006fd52a2ffca50a1./rep_barrier:seqnumDuringOpen/1732148939927/Put/seqid=0 2024-11-21T00:29:12,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741843_1019 (size=5518) 2024-11-21T00:29:12,182 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/rep_barrier/7afee3acb70c4f5d96c0f22e96898b7f 2024-11-21T00:29:12,214 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/table/5f2fb4e273564bb9a7e04a8caa5ddf12 is 40, key is test/table:state/1732148939950/Put/seqid=0 2024-11-21T00:29:12,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741844_1020 (size=5165) 2024-11-21T00:29:12,222 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=72 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/table/5f2fb4e273564bb9a7e04a8caa5ddf12 2024-11-21T00:29:12,229 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/info/bdebb88f366a49d88a23748c2e4c0511 as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/info/bdebb88f366a49d88a23748c2e4c0511 2024-11-21T00:29:12,233 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/info/bdebb88f366a49d88a23748c2e4c0511, entries=10, sequenceid=11, filesize=6.3 K 2024-11-21T00:29:12,235 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/ns/129536db99454a9ea9e9bedcbeb1d1e4 as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/ns/129536db99454a9ea9e9bedcbeb1d1e4 2024-11-21T00:29:12,243 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/ns/129536db99454a9ea9e9bedcbeb1d1e4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:29:12,244 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/rep_barrier/7afee3acb70c4f5d96c0f22e96898b7f as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/rep_barrier/7afee3acb70c4f5d96c0f22e96898b7f 2024-11-21T00:29:12,249 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/rep_barrier/7afee3acb70c4f5d96c0f22e96898b7f, entries=1, sequenceid=11, filesize=5.4 K 2024-11-21T00:29:12,250 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/.tmp/table/5f2fb4e273564bb9a7e04a8caa5ddf12 as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/table/5f2fb4e273564bb9a7e04a8caa5ddf12 2024-11-21T00:29:12,254 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/table/5f2fb4e273564bb9a7e04a8caa5ddf12, entries=2, sequenceid=11, filesize=5.0 K 2024-11-21T00:29:12,255 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.27 KB/1305, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 1588230740 in 706ms, sequenceid=11, compaction requested=false 2024-11-21T00:29:12,269 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-21T00:29:12,269 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:29:12,269 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:29:12,269 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:29:12,269 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148951548Running coprocessor pre-close hooks at 1732148951548Disabling compacts and flushes for region at 1732148951548Disabling writes for close at 1732148951549 (+1 ms)Obtaining lock to block concurrent updates at 1732148951549Preparing flush snapshotting stores in 1588230740 at 1732148951549Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1305, getHeapSize=3392, getOffHeapSize=0, getCellsCount=15 at 1732148951549Flushing stores of hbase:meta,,1.1588230740 at 1732148951556 (+7 ms)Flushing 1588230740/info: creating writer at 1732148951556Flushing 1588230740/info: appending metadata at 1732148951592 (+36 ms)Flushing 1588230740/info: closing flushed file at 1732148951592Flushing 1588230740/ns: creating writer at 1732148951656 (+64 ms)Flushing 1588230740/ns: appending metadata at 1732148951691 (+35 ms)Flushing 1588230740/ns: closing flushed file at 1732148951691Flushing 1588230740/rep_barrier: creating writer at 1732148952156 (+465 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148952172 (+16 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148952172Flushing 1588230740/table: creating writer at 1732148952187 (+15 ms)Flushing 1588230740/table: appending metadata at 1732148952214 (+27 ms)Flushing 1588230740/table: closing flushed file at 1732148952214Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6db8c072: reopening flushed file at 1732148952228 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@93a9700: reopening flushed file at 1732148952234 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55347e36: reopening flushed file at 1732148952243 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c09c65b: reopening flushed file at 1732148952249 (+6 ms)Finished flush of dataSize ~1.27 KB/1305, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 1588230740 in 706ms, sequenceid=11, compaction requested=false at 1732148952255 (+6 ms)Writing region close event to WAL at 1732148952264 (+9 ms)Running coprocessor post-close hooks at 1732148952269 (+5 ms)Closed at 1732148952269 2024-11-21T00:29:12,270 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:29:12,347 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,40747,1732148929411; all regions closed. 2024-11-21T00:29:12,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741834_1010 (size=2717) 2024-11-21T00:29:12,351 DEBUG [RS:0;5ed4808ef0e6:40747 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/oldWALs 2024-11-21T00:29:12,351 INFO [RS:0;5ed4808ef0e6:40747 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C40747%2C1732148929411.meta:.meta(num 1732148931909) 2024-11-21T00:29:12,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741833_1009 (size=1374) 2024-11-21T00:29:12,356 DEBUG [RS:0;5ed4808ef0e6:40747 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/oldWALs 2024-11-21T00:29:12,356 INFO [RS:0;5ed4808ef0e6:40747 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 5ed4808ef0e6%2C40747%2C1732148929411:(num 1732148931108) 2024-11-21T00:29:12,356 DEBUG [RS:0;5ed4808ef0e6:40747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:12,356 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:29:12,356 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:29:12,356 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:29:12,356 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:29:12,356 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:29:12,357 INFO [RS:0;5ed4808ef0e6:40747 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:40747. 2024-11-21T00:29:12,357 DEBUG [RS:0;5ed4808ef0e6:40747 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSink.stopReplicationSinkServices(ReplicationSink.java:463) at org.apache.hadoop.hbase.replication.ReplicationSinkServiceImpl.stopReplicationService(ReplicationSinkServiceImpl.java:89) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2535) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:12,357 DEBUG [RS:0;5ed4808ef0e6:40747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:12,357 DEBUG [RS:0;5ed4808ef0e6:40747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:12,357 INFO [RS:0;5ed4808ef0e6:40747 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40747 2024-11-21T00:29:12,357 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:12,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742/rs 2024-11-21T00:29:12,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1702889742/rs/5ed4808ef0e6,40747,1732148929411 2024-11-21T00:29:12,408 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:29:12,409 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,40747,1732148929411] 2024-11-21T00:29:12,419 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /1702889742/draining/5ed4808ef0e6,40747,1732148929411 already deleted, retry=false 2024-11-21T00:29:12,419 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,40747,1732148929411 expired; onlineServers=0 2024-11-21T00:29:12,419 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,37585,1732148929254' ***** 2024-11-21T00:29:12,419 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:29:12,419 INFO [M:0;5ed4808ef0e6:37585 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:29:12,419 INFO [M:0;5ed4808ef0e6:37585 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:29:12,419 DEBUG [M:0;5ed4808ef0e6:37585 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:29:12,419 DEBUG [M:0;5ed4808ef0e6:37585 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:29:12,419 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:29:12,419 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148930900 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148930900,5,FailOnTimeoutGroup] 2024-11-21T00:29:12,419 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148930900 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148930900,5,FailOnTimeoutGroup] 2024-11-21T00:29:12,419 INFO [M:0;5ed4808ef0e6:37585 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:29:12,420 INFO [M:0;5ed4808ef0e6:37585 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:29:12,420 DEBUG [M:0;5ed4808ef0e6:37585 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:29:12,420 INFO [M:0;5ed4808ef0e6:37585 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:29:12,420 INFO [M:0;5ed4808ef0e6:37585 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:29:12,420 INFO [M:0;5ed4808ef0e6:37585 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:29:12,420 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:29:12,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/1702889742/master 2024-11-21T00:29:12,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/1702889742 2024-11-21T00:29:12,440 DEBUG [M:0;5ed4808ef0e6:37585 {}] zookeeper.RecoverableZooKeeper(212): Node /1702889742/master already deleted, retry=false 2024-11-21T00:29:12,440 DEBUG [M:0;5ed4808ef0e6:37585 {}] master.ActiveMasterManager(353): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Failed delete of our master address node; KeeperErrorCode = NoNode for /1702889742/master 2024-11-21T00:29:12,441 INFO [M:0;5ed4808ef0e6:37585 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/.lastflushedseqids 2024-11-21T00:29:12,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741845_1021 (size=173) 2024-11-21T00:29:12,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:12,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40747-0x1015acb039c0004, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:12,519 INFO [RS:0;5ed4808ef0e6:40747 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:29:12,519 INFO [RS:0;5ed4808ef0e6:40747 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,40747,1732148929411; zookeeper connection closed. 2024-11-21T00:29:12,520 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@675709b8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@675709b8 2024-11-21T00:29:12,520 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:29:12,554 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:12,564 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,33745,1732148444978.replicationSource.wal-reader.5ed4808ef0e6%2C33745%2C1732148444978,1-5ed4808ef0e6,33745,1732148444978 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:34141/user/jenkins/test-data/d8663753-0603-6b4f-a86d-c8b873d60e0a/WALs/5ed4808ef0e6,33745,1732148444978/5ed4808ef0e6%2C33745%2C1732148444978.1732148447287 to pos 1033, reset compression=false 2024-11-21T00:29:12,846 INFO [M:0;5ed4808ef0e6:37585 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:29:12,846 INFO [M:0;5ed4808ef0e6:37585 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:29:12,846 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:29:12,846 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:12,846 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:12,846 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:29:12,846 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:12,846 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=28.00 KB heapSize=33.88 KB 2024-11-21T00:29:12,871 DEBUG [M:0;5ed4808ef0e6:37585 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1dd5dfb992d548ba945c6b25c2cb3765 is 82, key is hbase:meta,,1/info:regioninfo/1732148932029/Put/seqid=0 2024-11-21T00:29:12,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741846_1022 (size=5672) 2024-11-21T00:29:12,876 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1dd5dfb992d548ba945c6b25c2cb3765 2024-11-21T00:29:12,918 DEBUG [M:0;5ed4808ef0e6:37585 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb1fb626114d48a99df4254598063ea7 is 1247, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732148939965/Put/seqid=0 2024-11-21T00:29:12,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741847_1023 (size=6587) 2024-11-21T00:29:12,969 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:13,351 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.45 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb1fb626114d48a99df4254598063ea7 2024-11-21T00:29:13,386 DEBUG [M:0;5ed4808ef0e6:37585 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/329a73e186f64bf78378a42d891eb431 is 69, key is 5ed4808ef0e6,40747,1732148929411/rs:state/1732148930941/Put/seqid=0 2024-11-21T00:29:13,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741848_1024 (size=5156) 2024-11-21T00:29:13,406 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/329a73e186f64bf78378a42d891eb431 2024-11-21T00:29:13,436 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1dd5dfb992d548ba945c6b25c2cb3765 as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1dd5dfb992d548ba945c6b25c2cb3765 2024-11-21T00:29:13,445 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1dd5dfb992d548ba945c6b25c2cb3765, entries=8, sequenceid=55, filesize=5.5 K 2024-11-21T00:29:13,446 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/eb1fb626114d48a99df4254598063ea7 as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb1fb626114d48a99df4254598063ea7 2024-11-21T00:29:13,449 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/eb1fb626114d48a99df4254598063ea7, entries=6, sequenceid=55, filesize=6.4 K 2024-11-21T00:29:13,450 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/329a73e186f64bf78378a42d891eb431 as hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/329a73e186f64bf78378a42d891eb431 2024-11-21T00:29:13,454 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46101/user/jenkins/test-data/c8784218-1437-f38f-2f51-e9cfd83a4314/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/329a73e186f64bf78378a42d891eb431, entries=1, sequenceid=55, filesize=5.0 K 2024-11-21T00:29:13,456 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(3140): Finished flush of dataSize ~28.00 KB/28675, heapSize ~33.59 KB/34392, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 610ms, sequenceid=55, compaction requested=false 2024-11-21T00:29:13,536 INFO [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:13,536 DEBUG [M:0;5ed4808ef0e6:37585 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148952846Disabling compacts and flushes for region at 1732148952846Disabling writes for close at 1732148952846Obtaining lock to block concurrent updates at 1732148952846Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148952846Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=28675, getHeapSize=34632, getOffHeapSize=0, getCellsCount=66 at 1732148952847 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148952847Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148952847Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148952871 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148952871Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148952881 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148952917 (+36 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148952917Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148953357 (+440 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148953385 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148953385Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@90a88a: reopening flushed file at 1732148953431 (+46 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6fcae078: reopening flushed file at 1732148953445 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42f3f748: reopening flushed file at 1732148953449 (+4 ms)Finished flush of dataSize ~28.00 KB/28675, heapSize ~33.59 KB/34392, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 610ms, sequenceid=55, compaction requested=false at 1732148953456 (+7 ms)Writing region close event to WAL at 1732148953536 (+80 ms)Closed at 1732148953536 2024-11-21T00:29:13,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38623 is added to blk_1073741830_1006 (size=32782) 2024-11-21T00:29:13,543 INFO [M:0;5ed4808ef0e6:37585 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:29:13,543 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:29:13,543 INFO [M:0;5ed4808ef0e6:37585 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37585 2024-11-21T00:29:13,544 INFO [M:0;5ed4808ef0e6:37585 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:29:13,561 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:13,698 INFO [M:0;5ed4808ef0e6:37585 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:29:13,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:13,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37585-0x1015acb039c0003, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:13,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@483ea1e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:29:13,721 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6099d95f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:29:13,722 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:29:13,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@156f5b83{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:29:13,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@679c947f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.log.dir/,STOPPED} 2024-11-21T00:29:13,728 WARN [BP-1901245850-172.17.0.2-1732148927192 heartbeating to localhost/127.0.0.1:46101 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:29:13,728 WARN [BP-1901245850-172.17.0.2-1732148927192 heartbeating to localhost/127.0.0.1:46101 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1901245850-172.17.0.2-1732148927192 (Datanode Uuid ec6de4a0-9f6c-4551-96a6-49bf945103ff) service to localhost/127.0.0.1:46101 2024-11-21T00:29:13,729 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:29:13,729 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/cluster_db71360f-4ddc-991f-5d5b-6c51c7db50b9/data/data1/current/BP-1901245850-172.17.0.2-1732148927192 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:29:13,729 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:29:13,729 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:29:13,729 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/cluster_db71360f-4ddc-991f-5d5b-6c51c7db50b9/data/data2/current/BP-1901245850-172.17.0.2-1732148927192 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:29:13,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ec68835{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:29:13,739 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ff310ca{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:29:13,739 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:29:13,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@455d2d73{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:29:13,739 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7d073492{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/d301888e-7f2d-898c-bc2c-74942e0f7eb1/hadoop.log.dir/,STOPPED} 2024-11-21T00:29:13,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:29:13,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-21T00:29:13,757 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:13,757 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:345) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:13,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:13,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:13,758 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-21T00:29:13,758 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:13,758 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=190174201, stopped=false 2024-11-21T00:29:13,758 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=5ed4808ef0e6,40563,1732148924943 2024-11-21T00:29:13,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01646092936/running 2024-11-21T00:29:13,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:29:13,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01646092936/running 2024-11-21T00:29:13,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:29:13,777 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:29:13,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on znode that does not yet exist, /01646092936/running 2024-11-21T00:29:13,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Set watcher on znode that does not yet exist, /01646092936/running 2024-11-21T00:29:13,778 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-21T00:29:13,778 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.replication.TestMasterReplication.shutDownMiniClusters(TestMasterReplication.java:602) at org.apache.hadoop.hbase.replication.TestMasterReplication.testHFileMultiSlaveReplication(TestMasterReplication.java:345) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:13,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:13,779 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '5ed4808ef0e6,38737,1732148925192' ***** 2024-11-21T00:29:13,779 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-21T00:29:13,779 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-21T00:29:13,779 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-21T00:29:13,779 INFO [RS:0;5ed4808ef0e6:38737 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-21T00:29:13,779 INFO [RS:0;5ed4808ef0e6:38737 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-21T00:29:13,779 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(3091): Received CLOSE for 0c91503478896a32e34433cb639122e5 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(3091): Received CLOSE for 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(959): stopping server 5ed4808ef0e6,38737,1732148925192 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:38737. 2024-11-21T00:29:13,780 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0c91503478896a32e34433cb639122e5, disabling compactions & flushes 2024-11-21T00:29:13,780 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:29:13,780 DEBUG [RS:0;5ed4808ef0e6:38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:13,780 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:13,780 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:29:13,780 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on test,,1732148938288.0c91503478896a32e34433cb639122e5. after waiting 0 ms 2024-11-21T00:29:13,780 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-21T00:29:13,780 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-21T00:29:13,792 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-21T00:29:13,792 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1325): Online Regions={0c91503478896a32e34433cb639122e5=test,,1732148938288.0c91503478896a32e34433cb639122e5., 1588230740=hbase:meta,,1.1588230740, 1ebb67afce9ecc21dff27310bd89573a=hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a.} 2024-11-21T00:29:13,792 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 0c91503478896a32e34433cb639122e5, 1588230740, 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:13,792 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-21T00:29:13,792 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-21T00:29:13,792 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-21T00:29:13,792 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-21T00:29:13,792 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-21T00:29:13,793 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.54 KB heapSize=5.53 KB 2024-11-21T00:29:13,812 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/default/test/0c91503478896a32e34433cb639122e5/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter 2024-11-21T00:29:13,817 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0c91503478896a32e34433cb639122e5: Waiting for close lock at 1732148953780Running coprocessor pre-close hooks at 1732148953780Disabling compacts and flushes for region at 1732148953780Disabling writes for close at 1732148953780Writing region close event to WAL at 1732148953802 (+22 ms)Running coprocessor post-close hooks at 1732148953817 (+15 ms)Closed at 1732148953817 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed test,,1732148938288.0c91503478896a32e34433cb639122e5. 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1ebb67afce9ecc21dff27310bd89573a, disabling compactions & flushes 2024-11-21T00:29:13,817 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. after waiting 0 ms 2024-11-21T00:29:13,817 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:13,817 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 1ebb67afce9ecc21dff27310bd89573a 3/3 column families, dataSize=1.83 KB heapSize=3.97 KB 2024-11-21T00:29:13,827 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/info/fba9875594fb4b5fa9dbf65c1009d9bb is 147, key is hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a./info:regioninfo/1732148942336/Put/seqid=0 2024-11-21T00:29:13,842 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/.tmp/hfileref/52c55ed6358c40aabff52e19bfc0a3a9 is 74, key is 1/hfileref:49541f8648ee422cba0743c67bf248d8_SeqId_6_/1732148948140/DeleteColumn/seqid=0 2024-11-21T00:29:13,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741847_1023 (size=7686) 2024-11-21T00:29:13,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741848_1024 (size=5489) 2024-11-21T00:29:13,863 INFO [regionserver/5ed4808ef0e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:29:13,865 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=840 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/.tmp/hfileref/52c55ed6358c40aabff52e19bfc0a3a9 2024-11-21T00:29:13,916 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/.tmp/queue/fa656f1e82594f3385a1ffdd27a41983 is 153, key is 1-5ed4808ef0e6,38737,1732148925192/queue:5ed4808ef0e6%2C38737%2C1732148925192/1732148948142/Put/seqid=0 2024-11-21T00:29:13,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741849_1025 (size=5504) 2024-11-21T00:29:13,944 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.01 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/.tmp/queue/fa656f1e82594f3385a1ffdd27a41983 2024-11-21T00:29:13,962 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/.tmp/hfileref/52c55ed6358c40aabff52e19bfc0a3a9 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/hfileref/52c55ed6358c40aabff52e19bfc0a3a9 2024-11-21T00:29:13,973 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/hfileref/52c55ed6358c40aabff52e19bfc0a3a9, entries=6, sequenceid=17, filesize=5.4 K 2024-11-21T00:29:13,975 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/.tmp/queue/fa656f1e82594f3385a1ffdd27a41983 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/queue/fa656f1e82594f3385a1ffdd27a41983 2024-11-21T00:29:13,983 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/queue/fa656f1e82594f3385a1ffdd27a41983, entries=2, sequenceid=17, filesize=5.4 K 2024-11-21T00:29:13,984 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.83 KB/1878, heapSize ~3.69 KB/3776, currentSize=0 B/0 for 1ebb67afce9ecc21dff27310bd89573a in 167ms, sequenceid=17, compaction requested=false 2024-11-21T00:29:13,993 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 1ebb67afce9ecc21dff27310bd89573a 2024-11-21T00:29:14,012 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-21T00:29:14,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,029 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,030 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,031 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,050 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/replication/1ebb67afce9ecc21dff27310bd89573a/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=1 2024-11-21T00:29:14,051 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:29:14,051 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:29:14,051 INFO [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:14,051 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1ebb67afce9ecc21dff27310bd89573a: Waiting for close lock at 1732148953817Running coprocessor pre-close hooks at 1732148953817Disabling compacts and flushes for region at 1732148953817Disabling writes for close at 1732148953817Obtaining lock to block concurrent updates at 1732148953817Preparing flush snapshotting stores in 1ebb67afce9ecc21dff27310bd89573a at 1732148953817Finished memstore snapshotting hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a., syncing WAL and waiting on mvcc, flushsize=dataSize=1878, getHeapSize=4016, getOffHeapSize=0, getCellsCount=19 at 1732148953818 (+1 ms)Flushing stores of hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. at 1732148953818Flushing 1ebb67afce9ecc21dff27310bd89573a/hfileref: creating writer at 1732148953818Flushing 1ebb67afce9ecc21dff27310bd89573a/hfileref: appending metadata at 1732148953842 (+24 ms)Flushing 1ebb67afce9ecc21dff27310bd89573a/hfileref: closing flushed file at 1732148953842Flushing 1ebb67afce9ecc21dff27310bd89573a/queue: creating writer at 1732148953895 (+53 ms)Flushing 1ebb67afce9ecc21dff27310bd89573a/queue: appending metadata at 1732148953915 (+20 ms)Flushing 1ebb67afce9ecc21dff27310bd89573a/queue: closing flushed file at 1732148953915Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d0d7f92: reopening flushed file at 1732148953957 (+42 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5b08b252: reopening flushed file at 1732148953973 (+16 ms)Finished flush of dataSize ~1.83 KB/1878, heapSize ~3.69 KB/3776, currentSize=0 B/0 for 1ebb67afce9ecc21dff27310bd89573a in 167ms, sequenceid=17, compaction requested=false at 1732148953984 (+11 ms)Writing region close event to WAL at 1732148954016 (+32 ms)Running coprocessor post-close hooks at 1732148954051 (+35 ms)Closed at 1732148954051 2024-11-21T00:29:14,051 DEBUG [RS_CLOSE_REGION-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:replication,,1732148941264.1ebb67afce9ecc21dff27310bd89573a. 2024-11-21T00:29:14,077 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:14,102 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:29:14,102 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:14,103 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 2-5ed4808ef0e6,38737,1732148925192 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725, lastWalPosition=688, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:29:14,104 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.HRegionServer(2440): ***** ABORTING region server 5ed4808ef0e6,38737,1732148925192: Failed to operate on replication queue ***** org.apache.hadoop.hbase.replication.ReplicationException: failed to setOffset, queueId=2-5ed4808ef0e6,38737,1732148925192, walGroup=5ed4808ef0e6%2C38737%2C1732148925192, offset=5ed4808ef0e6%2C38737%2C1732148925192.1732148945725:688, lastSeqIds={} at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:159) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:38737 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at java.lang.Thread.getStackTrace(Thread.java:1619) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.setStackTrace(FutureUtils.java:144) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.rethrow(FutureUtils.java:163) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.get(FutureUtils.java:186) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.lambda$logPositionAndCleanOldLogs$7(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:589) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] at --------Future.get--------(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.mutate(ClientProtos.java:43836) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$mutate$0(RawAsyncTableImpl.java:180) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.call(ConnectionUtils.java:619) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.mutate(RawAsyncTableImpl.java:179) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.voidMutate(RawAsyncTableImpl.java:186) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$8(RawAsyncTableImpl.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.lambda$put$9(RawAsyncTableImpl.java:257) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.tracedFuture(TraceUtil.java:86) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.put(RawAsyncTableImpl.java:254) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.TableReplicationQueueStorage.setOffset(TableReplicationQueueStorage.java:132) ~[hbase-replication-3.0.0-beta-2-SNAPSHOT.jar:?] ... 7 more 2024-11-21T00:29:14,106 ERROR [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter, org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver, org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-21T00:29:14,106 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-21T00:29:14,106 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-21T00:29:14,106 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-21T00:29:14,107 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-21T00:29:14,107 INFO [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1765801984, "init": 1048576000, "max": 2306867200, "used": 1204720048 }, "NonHeapMemoryUsage": { "committed": 213778432, "init": 7667712, "max": -1, "used": 209379224 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [], "beans": [], "beans": [] } 2024-11-21T00:29:14,107 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.HRegionServer(2470): Unable to report fatal error to master org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:40563 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:344) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportRSFatalError(RegionServerStatusProtos.java:17290) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.abort(HRegionServer.java:2467) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abortRegionServer(SingleProcessHBaseCluster.java:192) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$2.run(SingleProcessHBaseCluster.java:185) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at java.security.AccessController.doPrivileged(AccessController.java:399) ~[?:?] at javax.security.auth.Subject.doAs(Subject.java:376) ~[?:?] at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.abort(SingleProcessHBaseCluster.java:182) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.interruptOrAbortWhenFail(ReplicationSourceManager.java:604) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.logPositionAndCleanOldLogs(ReplicationSourceManager.java:647) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceInterface.logPositionAndCleanOldLogs(ReplicationSourceInterface.java:211) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.updateLogPosition(ReplicationSourceShipper.java:266) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.shipEdits(ReplicationSourceShipper.java:158) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.run(ReplicationSourceShipper.java:119) ~[classes/:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=5ed4808ef0e6:40563 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.GeneratedConstructorAccessor136.newInstance(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-21T00:29:14,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,111 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,112 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,122 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,133 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,174 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-21T00:29:14,193 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:14,222 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:29:14,247 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.19 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/info/fba9875594fb4b5fa9dbf65c1009d9bb 2024-11-21T00:29:14,284 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/ns/06997c87542c4613890fb7f69389a570 is 43, key is default/ns:d/1732148927120/Put/seqid=0 2024-11-21T00:29:14,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741850_1026 (size=5153) 2024-11-21T00:29:14,299 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-21T00:29:14,299 INFO [regionserver/5ed4808ef0e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-21T00:29:14,343 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:14,393 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:14,593 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:14,652 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:14,667 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 393, reset compression=false 2024-11-21T00:29:14,704 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/ns/06997c87542c4613890fb7f69389a570 2024-11-21T00:29:14,708 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceWALReader(177): Read 0 WAL entries eligible for replication 2024-11-21T00:29:14,708 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:14,708 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.ReplicationSourceShipper(110): Shipper from source 1-5ed4808ef0e6,38737,1732148925192 got entry batch from reader: WALEntryBatch [walEntries=[], lastWalPath=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725, lastWalPosition=688, nbRowKeys=0, nbHFiles=0, heapSize=0, lastSeqIds={}, endOfFile=false,usedBufferSize=0] 2024-11-21T00:29:14,709 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.HRegionServer(2435): Abort already in progress. Ignoring the current request with reason: Failed to operate on replication queue 2024-11-21T00:29:14,769 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/rep_barrier/162ff227058e4b7a88ce2c24e6d8cb2e is 112, key is test,,1732148938288.0c91503478896a32e34433cb639122e5./rep_barrier:seqnumDuringOpen/1732148938864/Put/seqid=0 2024-11-21T00:29:14,793 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-21T00:29:14,793 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-21T00:29:14,793 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:14,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741851_1027 (size=5518) 2024-11-21T00:29:14,912 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-21T00:29:14,945 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:14,994 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:15,082 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:15,194 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:15,195 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/rep_barrier/162ff227058e4b7a88ce2c24e6d8cb2e 2024-11-21T00:29:15,222 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/table/09d89716b236403c86e7cea495b77eee is 53, key is hbase:replication/table:state/1732148942344/Put/seqid=0 2024-11-21T00:29:15,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741852_1028 (size=5308) 2024-11-21T00:29:15,256 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/table/09d89716b236403c86e7cea495b77eee 2024-11-21T00:29:15,280 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:15,293 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/info/fba9875594fb4b5fa9dbf65c1009d9bb as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/info/fba9875594fb4b5fa9dbf65c1009d9bb 2024-11-21T00:29:15,322 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/info/fba9875594fb4b5fa9dbf65c1009d9bb, entries=20, sequenceid=16, filesize=7.5 K 2024-11-21T00:29:15,335 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/ns/06997c87542c4613890fb7f69389a570 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/ns/06997c87542c4613890fb7f69389a570 2024-11-21T00:29:15,373 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/ns/06997c87542c4613890fb7f69389a570, entries=2, sequenceid=16, filesize=5.0 K 2024-11-21T00:29:15,381 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/rep_barrier/162ff227058e4b7a88ce2c24e6d8cb2e as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/rep_barrier/162ff227058e4b7a88ce2c24e6d8cb2e 2024-11-21T00:29:15,389 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/rep_barrier/162ff227058e4b7a88ce2c24e6d8cb2e, entries=1, sequenceid=16, filesize=5.4 K 2024-11-21T00:29:15,391 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/.tmp/table/09d89716b236403c86e7cea495b77eee as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/table/09d89716b236403c86e7cea495b77eee 2024-11-21T00:29:15,396 DEBUG [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-21T00:29:15,404 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/table/09d89716b236403c86e7cea495b77eee, entries=4, sequenceid=16, filesize=5.2 K 2024-11-21T00:29:15,405 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1613ms, sequenceid=16, compaction requested=false 2024-11-21T00:29:15,405 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-21T00:29:15,462 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/data/hbase/meta/1588230740/recovered.edits/19.seqid, newMaxSeqId=19, maxSeqId=1 2024-11-21T00:29:15,462 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.replication.regionserver.ReplicationObserver 2024-11-21T00:29:15,462 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-21T00:29:15,462 INFO [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-21T00:29:15,462 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732148953792Running coprocessor pre-close hooks at 1732148953792Disabling compacts and flushes for region at 1732148953792Disabling writes for close at 1732148953792Obtaining lock to block concurrent updates at 1732148953793 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732148953793Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=2599, getHeapSize=5600, getOffHeapSize=0, getCellsCount=27 at 1732148953793Flushing stores of hbase:meta,,1.1588230740 at 1732148953793Flushing 1588230740/info: creating writer at 1732148953793Flushing 1588230740/info: appending metadata at 1732148953827 (+34 ms)Flushing 1588230740/info: closing flushed file at 1732148953827Flushing 1588230740/ns: creating writer at 1732148954252 (+425 ms)Flushing 1588230740/ns: appending metadata at 1732148954283 (+31 ms)Flushing 1588230740/ns: closing flushed file at 1732148954284 (+1 ms)Flushing 1588230740/rep_barrier: creating writer at 1732148954735 (+451 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732148954768 (+33 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732148954768Flushing 1588230740/table: creating writer at 1732148955202 (+434 ms)Flushing 1588230740/table: appending metadata at 1732148955222 (+20 ms)Flushing 1588230740/table: closing flushed file at 1732148955222Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6c347272: reopening flushed file at 1732148955288 (+66 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@76d6356: reopening flushed file at 1732148955322 (+34 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a16aa6f: reopening flushed file at 1732148955373 (+51 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2126722d: reopening flushed file at 1732148955389 (+16 ms)Finished flush of dataSize ~2.54 KB/2599, heapSize ~5.47 KB/5600, currentSize=0 B/0 for 1588230740 in 1613ms, sequenceid=16, compaction requested=false at 1732148955405 (+16 ms)Writing region close event to WAL at 1732148955459 (+54 ms)Running coprocessor post-close hooks at 1732148955462 (+3 ms)Closed at 1732148955462 2024-11-21T00:29:15,462 DEBUG [RS_CLOSE_META-regionserver/5ed4808ef0e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-21T00:29:15,589 DEBUG [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] regionserver.WALEntryStream(222): Reset reader hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/WALs/5ed4808ef0e6,38737,1732148925192/5ed4808ef0e6%2C38737%2C1732148925192.1732148945725 to pos 688, reset compression=false 2024-11-21T00:29:15,596 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(976): stopping server 5ed4808ef0e6,38737,1732148925192; all regions closed. 2024-11-21T00:29:15,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741842_1018 (size=859) 2024-11-21T00:29:15,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741843_1019 (size=2791) 2024-11-21T00:29:15,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741844_1020 (size=696) 2024-11-21T00:29:15,619 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:15,619 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.LeaseManager(133): Closed leases 2024-11-21T00:29:15,619 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:29:15,620 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.ChoreService(370): Chore service for: regionserver/5ed4808ef0e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-21T00:29:15,620 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:29:15,620 INFO [regionserver/5ed4808ef0e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:29:15,620 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.ReplicationSource(687): peerId=1, Closing source 1-5ed4808ef0e6,38737,1732148925192 because: Region server is closing 2024-11-21T00:29:15,620 INFO [RS:0;5ed4808ef0e6:38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:38737. 2024-11-21T00:29:15,620 DEBUG [RS:0;5ed4808ef0e6:38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:15,620 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:15,621 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:15,621 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:15,724 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.ReplicationSource(739): peerId=1, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,1-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,1-5ed4808ef0e6,38737,1732148925192 terminated 2024-11-21T00:29:15,724 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.ReplicationSource(687): peerId=2, Closing source 2-5ed4808ef0e6,38737,1732148925192 because: Region server is closing 2024-11-21T00:29:15,724 INFO [RS:0;5ed4808ef0e6:38737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;5ed4808ef0e6:38737. 2024-11-21T00:29:15,724 DEBUG [RS:0;5ed4808ef0e6:38737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.disconnect(HBaseReplicationEndpoint.java:117) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.doStop(HBaseReplicationEndpoint.java:143) at org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService.stopAsync(AbstractService.java:285) at org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.stop(HBaseReplicationEndpoint.java:133) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:708) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:682) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:677) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSource.terminate(ReplicationSource.java:672) at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager.join(ReplicationSourceManager.java:987) at org.apache.hadoop.hbase.replication.regionserver.Replication.stopReplicationService(Replication.java:160) at org.apache.hadoop.hbase.regionserver.HRegionServer.stopServiceThreads(HRegionServer.java:2532) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1011) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-21T00:29:15,724 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:15,725 DEBUG [RS:0;5ed4808ef0e6:38737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-21T00:29:15,725 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-21T00:29:15,825 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.ReplicationSource(739): peerId=2, ReplicationSourceWorker RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.shipper5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 terminated 2024-11-21T00:29:15,825 INFO [RS:0;5ed4808ef0e6:38737 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38737 2024-11-21T00:29:15,825 WARN [RS_REFRESH_PEER-regionserver/5ed4808ef0e6:0-0.replicationSource,2-5ed4808ef0e6,38737,1732148925192.replicationSource.wal-reader.5ed4808ef0e6%2C38737%2C1732148925192,2-5ed4808ef0e6,38737,1732148925192 {}] util.Threads(127): sleep interrupted java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.hbase.util.Threads.sleep(Threads.java:125) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.sleep(ReplicationSourceWALReader.java:130) ~[classes/:?] at org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceWALReader.run(ReplicationSourceWALReader.java:162) ~[classes/:?] 2024-11-21T00:29:15,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01646092936/rs/5ed4808ef0e6,38737,1732148925192 2024-11-21T00:29:15,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936/rs 2024-11-21T00:29:15,836 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:29:15,837 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5ed4808ef0e6,38737,1732148925192] 2024-11-21T00:29:15,847 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /01646092936/draining/5ed4808ef0e6,38737,1732148925192 already deleted, retry=false 2024-11-21T00:29:15,847 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 5ed4808ef0e6,38737,1732148925192 expired; onlineServers=0 2024-11-21T00:29:15,847 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '5ed4808ef0e6,40563,1732148924943' ***** 2024-11-21T00:29:15,847 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-21T00:29:15,847 INFO [M:0;5ed4808ef0e6:40563 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-21T00:29:15,847 INFO [M:0;5ed4808ef0e6:40563 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-21T00:29:15,847 DEBUG [M:0;5ed4808ef0e6:40563 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-21T00:29:15,847 DEBUG [M:0;5ed4808ef0e6:40563 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-21T00:29:15,847 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-21T00:29:15,847 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148926151 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.small.0-1732148926151,5,FailOnTimeoutGroup] 2024-11-21T00:29:15,847 INFO [M:0;5ed4808ef0e6:40563 {}] hbase.ChoreService(370): Chore service for: master/5ed4808ef0e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-21T00:29:15,847 INFO [M:0;5ed4808ef0e6:40563 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-21T00:29:15,847 DEBUG [M:0;5ed4808ef0e6:40563 {}] master.HMaster(1795): Stopping service threads 2024-11-21T00:29:15,847 INFO [M:0;5ed4808ef0e6:40563 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-21T00:29:15,847 INFO [M:0;5ed4808ef0e6:40563 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-21T00:29:15,847 ERROR [M:0;5ed4808ef0e6:40563 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (1040632728) connection to localhost/127.0.0.1:38659 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:38659,5,PEWorkerGroup] 2024-11-21T00:29:15,848 INFO [M:0;5ed4808ef0e6:40563 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-21T00:29:15,848 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-21T00:29:15,848 DEBUG [master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148926151 {}] cleaner.HFileCleaner(306): Exit Thread[master/5ed4808ef0e6:0:becomeActiveMaster-HFileCleaner.large.0-1732148926151,5,FailOnTimeoutGroup] 2024-11-21T00:29:15,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/01646092936/master 2024-11-21T00:29:15,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/01646092936 2024-11-21T00:29:15,869 DEBUG [M:0;5ed4808ef0e6:40563 {}] zookeeper.RecoverableZooKeeper(212): Node /01646092936/master already deleted, retry=false 2024-11-21T00:29:15,869 DEBUG [M:0;5ed4808ef0e6:40563 {}] master.ActiveMasterManager(353): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Failed delete of our master address node; KeeperErrorCode = NoNode for /01646092936/master 2024-11-21T00:29:15,871 INFO [M:0;5ed4808ef0e6:40563 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/.lastflushedseqids 2024-11-21T00:29:15,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741853_1029 (size=263) 2024-11-21T00:29:15,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:15,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38737-0x1015acb039c0001, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:15,958 INFO [RS:0;5ed4808ef0e6:38737 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:29:15,958 INFO [RS:0;5ed4808ef0e6:38737 {}] regionserver.HRegionServer(1031): Exiting; stopping=5ed4808ef0e6,38737,1732148925192; zookeeper connection closed. 2024-11-21T00:29:15,958 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@226b536b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@226b536b 2024-11-21T00:29:15,959 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-21T00:29:16,332 INFO [M:0;5ed4808ef0e6:40563 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-21T00:29:16,332 INFO [M:0;5ed4808ef0e6:40563 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-21T00:29:16,333 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-21T00:29:16,333 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:16,333 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:16,333 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-21T00:29:16,333 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:16,333 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=59.83 KB heapSize=70.51 KB 2024-11-21T00:29:16,356 DEBUG [M:0;5ed4808ef0e6:40563 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9a2173dfa824b8dadc093dc2ec70ceb is 82, key is hbase:meta,,1/info:regioninfo/1732148926973/Put/seqid=0 2024-11-21T00:29:16,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741854_1030 (size=5672) 2024-11-21T00:29:16,371 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9a2173dfa824b8dadc093dc2ec70ceb 2024-11-21T00:29:16,417 DEBUG [M:0;5ed4808ef0e6:40563 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25cc5483efa04d95b0762d3bfaf93580 is 1480, key is \x00\x00\x00\x00\x00\x00\x00\x08/proc:d/1732148942347/Put/seqid=0 2024-11-21T00:29:16,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741855_1031 (size=8909) 2024-11-21T00:29:16,835 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.28 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25cc5483efa04d95b0762d3bfaf93580 2024-11-21T00:29:16,869 DEBUG [M:0;5ed4808ef0e6:40563 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a111678d8474eb1b74807e915511688 is 69, key is 5ed4808ef0e6,38737,1732148925192/rs:state/1732148926267/Put/seqid=0 2024-11-21T00:29:16,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741856_1032 (size=5156) 2024-11-21T00:29:16,895 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a111678d8474eb1b74807e915511688 2024-11-21T00:29:16,902 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a9a2173dfa824b8dadc093dc2ec70ceb as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a9a2173dfa824b8dadc093dc2ec70ceb 2024-11-21T00:29:16,911 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a9a2173dfa824b8dadc093dc2ec70ceb, entries=8, sequenceid=112, filesize=5.5 K 2024-11-21T00:29:16,911 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/25cc5483efa04d95b0762d3bfaf93580 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/25cc5483efa04d95b0762d3bfaf93580 2024-11-21T00:29:16,917 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/25cc5483efa04d95b0762d3bfaf93580, entries=13, sequenceid=112, filesize=8.7 K 2024-11-21T00:29:16,917 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5a111678d8474eb1b74807e915511688 as hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a111678d8474eb1b74807e915511688 2024-11-21T00:29:16,923 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38659/user/jenkins/test-data/561ac99b-2cef-3d34-0f35-dceabdbd2dfd/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5a111678d8474eb1b74807e915511688, entries=1, sequenceid=112, filesize=5.0 K 2024-11-21T00:29:16,924 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(3140): Finished flush of dataSize ~59.83 KB/61268, heapSize ~70.21 KB/71896, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 590ms, sequenceid=112, compaction requested=false 2024-11-21T00:29:16,932 INFO [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-21T00:29:16,933 DEBUG [M:0;5ed4808ef0e6:40563 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732148956333Disabling compacts and flushes for region at 1732148956333Disabling writes for close at 1732148956333Obtaining lock to block concurrent updates at 1732148956333Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732148956333Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=61268, getHeapSize=72136, getOffHeapSize=0, getCellsCount=131 at 1732148956333Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732148956334 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732148956334Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732148956355 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732148956355Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732148956376 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732148956417 (+41 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732148956417Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732148956840 (+423 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732148956869 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732148956869Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fe8a66: reopening flushed file at 1732148956901 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73cff4b5: reopening flushed file at 1732148956911 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14357b48: reopening flushed file at 1732148956917 (+6 ms)Finished flush of dataSize ~59.83 KB/61268, heapSize ~70.21 KB/71896, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 590ms, sequenceid=112, compaction requested=false at 1732148956924 (+7 ms)Writing region close event to WAL at 1732148956932 (+8 ms)Closed at 1732148956932 2024-11-21T00:29:16,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40739 is added to blk_1073741830_1006 (size=69055) 2024-11-21T00:29:16,942 INFO [M:0;5ed4808ef0e6:40563 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-21T00:29:16,943 INFO [M:0;5ed4808ef0e6:40563 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40563 2024-11-21T00:29:16,943 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-21T00:29:16,943 INFO [M:0;5ed4808ef0e6:40563 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-21T00:29:17,056 INFO [M:0;5ed4808ef0e6:40563 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-21T00:29:17,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:17,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40563-0x1015acb039c0000, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-21T00:29:17,064 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64681ff6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-21T00:29:17,065 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e3bc62f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:29:17,065 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:29:17,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@225b0623{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:29:17,065 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79393f7f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.log.dir/,STOPPED} 2024-11-21T00:29:17,066 WARN [BP-613913888-172.17.0.2-1732148922416 heartbeating to localhost/127.0.0.1:38659 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-21T00:29:17,066 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-21T00:29:17,066 WARN [BP-613913888-172.17.0.2-1732148922416 heartbeating to localhost/127.0.0.1:38659 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-613913888-172.17.0.2-1732148922416 (Datanode Uuid 92233fbd-f731-4ba2-ab54-9380f3ce15bc) service to localhost/127.0.0.1:38659 2024-11-21T00:29:17,066 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-21T00:29:17,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7/data/data1/current/BP-613913888-172.17.0.2-1732148922416 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:29:17,067 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/cluster_f9231599-d8fd-64ce-6d07-bed378d1c0e7/data/data2/current/BP-613913888-172.17.0.2-1732148922416 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-21T00:29:17,067 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-21T00:29:17,073 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@12e42a7e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-21T00:29:17,074 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@175d26d5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-21T00:29:17,074 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-21T00:29:17,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28070112{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-21T00:29:17,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d3ef9ba{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/16926423-955b-e581-6f21-4a09194a24d6/hadoop.log.dir/,STOPPED} 2024-11-21T00:29:17,080 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-21T00:29:17,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-21T00:29:17,154 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: replication.TestMasterReplication#testHFileMultiSlaveReplication Thread=837 (was 765) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:43965 from jenkins.hfs.23 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:43965 from jenkins.hfs.23 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:38659 from jenkins.hfs.23 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-50-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:43965 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-49-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.21@localhost:38659 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-49-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:38659 from jenkins.hfs.21 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-46-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-45-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-50-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-49-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:43965 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-45-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46101 from jenkins.hfs.22 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46101 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-45-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46101 from jenkins.hfs.22 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.23@localhost:43965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38659 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:38659 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:38659 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46101 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-47-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46101 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-48-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62972) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38659 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-48-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:38659 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:38659 from jenkins.hfs.22 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.22@localhost:46101 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-47-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62972) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: IPC Client (1040632728) connection to localhost/127.0.0.1:46101 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-48-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-50-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-46-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test-SendThread(127.0.0.1:62972) java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.ClientCnxnSocketNIO.cleanup(ClientCnxnSocketNIO.java:214) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanup(ClientCnxn.java:1395) app//org.apache.zookeeper.ClientCnxn$SendThread.cleanAndNotifyState(ClientCnxn.java:1336) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1309) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-46-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-47-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1128 (was 1016) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1110 (was 1052) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1326 (was 1816) 2024-11-21T00:29:17,155 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=837 is superior to 500 2024-11-21T00:29:17,155 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1128 is superior to 1024 2024-11-21T00:29:17,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster0-0x1015acb039c0002, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:29:17,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster2-0x1015acb039c0008, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:29:17,178 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster0-0x1015acb039c0002, quorum=127.0.0.1:62972, baseZNode=/01646092936 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:29:17,178 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster2-0x1015acb039c0008, quorum=127.0.0.1:62972, baseZNode=/2-1051660059 Received Disconnected from ZooKeeper, ignoring 2024-11-21T00:29:17,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): cluster1-0x1015acb039c0005, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received ZooKeeper Event, type=None, state=Disconnected, path=null 2024-11-21T00:29:17,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(640): cluster1-0x1015acb039c0005, quorum=127.0.0.1:62972, baseZNode=/1702889742 Received Disconnected from ZooKeeper, ignoring